2024-12-15 04:37:57,362 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-15 04:37:57,376 main DEBUG Took 0.012585 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-15 04:37:57,377 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-15 04:37:57,377 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-15 04:37:57,378 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-15 04:37:57,380 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 04:37:57,388 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-15 04:37:57,399 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 04:37:57,401 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 04:37:57,402 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 04:37:57,402 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 04:37:57,403 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 04:37:57,403 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 04:37:57,404 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 04:37:57,405 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 04:37:57,405 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 04:37:57,406 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 04:37:57,407 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 04:37:57,407 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 04:37:57,408 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 04:37:57,408 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 04:37:57,408 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 04:37:57,409 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 04:37:57,409 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 04:37:57,410 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 04:37:57,410 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 04:37:57,411 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 04:37:57,411 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 04:37:57,411 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 04:37:57,412 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 04:37:57,412 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-15 04:37:57,412 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 04:37:57,412 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-15 04:37:57,414 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-15 04:37:57,415 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-15 04:37:57,416 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-15 04:37:57,417 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-15 04:37:57,418 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-15 04:37:57,418 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-15 04:37:57,425 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-15 04:37:57,428 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-15 04:37:57,429 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-15 04:37:57,430 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-15 04:37:57,430 main DEBUG createAppenders(={Console}) 2024-12-15 04:37:57,431 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-12-15 04:37:57,431 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-12-15 04:37:57,431 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-12-15 04:37:57,432 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-15 04:37:57,432 main DEBUG OutputStream closed 2024-12-15 04:37:57,432 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-15 04:37:57,432 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-15 04:37:57,432 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-12-15 04:37:57,492 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-15 04:37:57,493 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-15 04:37:57,494 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-15 04:37:57,495 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-15 04:37:57,496 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-15 04:37:57,496 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-15 04:37:57,496 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-15 04:37:57,496 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-15 04:37:57,497 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-15 04:37:57,497 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-15 04:37:57,497 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-15 04:37:57,497 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-15 04:37:57,498 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-15 04:37:57,498 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-15 04:37:57,498 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-15 04:37:57,498 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-15 04:37:57,499 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-15 04:37:57,499 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-15 04:37:57,501 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-15 04:37:57,501 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-12-15 04:37:57,502 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-15 04:37:57,502 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-12-15T04:37:57,677 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190 2024-12-15 04:37:57,679 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-15 04:37:57,680 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-15T04:37:57,687 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithAdaptivePolicy timeout: 13 mins 2024-12-15T04:37:57,703 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-15T04:37:57,706 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/cluster_45efc259-f993-f806-507f-4a5cf2d8323c, deleteOnExit=true 2024-12-15T04:37:57,706 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-15T04:37:57,707 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/test.cache.data in system properties and HBase conf 2024-12-15T04:37:57,707 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/hadoop.tmp.dir in system properties and HBase conf 2024-12-15T04:37:57,708 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/hadoop.log.dir in system properties and HBase conf 2024-12-15T04:37:57,708 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-15T04:37:57,709 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-15T04:37:57,709 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-15T04:37:57,786 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-15T04:37:57,867 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-15T04:37:57,870 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-15T04:37:57,870 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-15T04:37:57,871 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-15T04:37:57,871 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-15T04:37:57,872 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-15T04:37:57,872 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-15T04:37:57,872 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-15T04:37:57,872 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-15T04:37:57,873 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-15T04:37:57,873 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/nfs.dump.dir in system properties and HBase conf 2024-12-15T04:37:57,873 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/java.io.tmpdir in system properties and HBase conf 2024-12-15T04:37:57,874 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-15T04:37:57,874 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-15T04:37:57,874 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-15T04:37:58,717 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-15T04:37:58,778 INFO [Time-limited test {}] log.Log(170): Logging initialized @2015ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-15T04:37:58,840 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-15T04:37:58,895 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-15T04:37:58,915 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-15T04:37:58,915 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-15T04:37:58,917 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-15T04:37:58,929 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-15T04:37:58,931 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/hadoop.log.dir/,AVAILABLE} 2024-12-15T04:37:58,933 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-15T04:37:59,103 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/java.io.tmpdir/jetty-localhost-37769-hadoop-hdfs-3_4_1-tests_jar-_-any-15508414273248856108/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-15T04:37:59,109 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:37769} 2024-12-15T04:37:59,109 INFO [Time-limited test {}] server.Server(415): Started @2346ms 2024-12-15T04:37:59,558 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-15T04:37:59,563 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-15T04:37:59,564 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-15T04:37:59,565 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-15T04:37:59,565 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-15T04:37:59,565 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/hadoop.log.dir/,AVAILABLE} 2024-12-15T04:37:59,566 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-15T04:37:59,658 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/java.io.tmpdir/jetty-localhost-45971-hadoop-hdfs-3_4_1-tests_jar-_-any-7275747295977512668/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-15T04:37:59,659 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:45971} 2024-12-15T04:37:59,659 INFO [Time-limited test {}] server.Server(415): Started @2896ms 2024-12-15T04:37:59,704 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-15T04:38:00,416 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/cluster_45efc259-f993-f806-507f-4a5cf2d8323c/dfs/data/data2/current/BP-1422360966-172.17.0.2-1734237478337/current, will proceed with Du for space computation calculation, 2024-12-15T04:38:00,416 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/cluster_45efc259-f993-f806-507f-4a5cf2d8323c/dfs/data/data1/current/BP-1422360966-172.17.0.2-1734237478337/current, will proceed with Du for space computation calculation, 2024-12-15T04:38:00,445 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-15T04:38:00,484 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd2370141ed5cb5e1 with lease ID 0xf2751686fefcb47c: Processing first storage report for DS-849ae404-fa04-48e7-bb9d-32a2aedce66f from datanode DatanodeRegistration(127.0.0.1:35109, datanodeUuid=03fa4d33-7e3e-4a2e-ae82-b3f01fbb1d68, infoPort=33999, infoSecurePort=0, ipcPort=46045, storageInfo=lv=-57;cid=testClusterID;nsid=1446199436;c=1734237478337) 2024-12-15T04:38:00,485 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd2370141ed5cb5e1 with lease ID 0xf2751686fefcb47c: from storage DS-849ae404-fa04-48e7-bb9d-32a2aedce66f node DatanodeRegistration(127.0.0.1:35109, datanodeUuid=03fa4d33-7e3e-4a2e-ae82-b3f01fbb1d68, infoPort=33999, infoSecurePort=0, ipcPort=46045, storageInfo=lv=-57;cid=testClusterID;nsid=1446199436;c=1734237478337), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-15T04:38:00,485 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd2370141ed5cb5e1 with lease ID 0xf2751686fefcb47c: Processing first storage report for DS-b3c23bf9-9c96-4d13-a6f6-3fe6779a8987 from datanode DatanodeRegistration(127.0.0.1:35109, datanodeUuid=03fa4d33-7e3e-4a2e-ae82-b3f01fbb1d68, infoPort=33999, infoSecurePort=0, ipcPort=46045, storageInfo=lv=-57;cid=testClusterID;nsid=1446199436;c=1734237478337) 2024-12-15T04:38:00,485 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd2370141ed5cb5e1 with lease ID 0xf2751686fefcb47c: from storage DS-b3c23bf9-9c96-4d13-a6f6-3fe6779a8987 node DatanodeRegistration(127.0.0.1:35109, datanodeUuid=03fa4d33-7e3e-4a2e-ae82-b3f01fbb1d68, infoPort=33999, infoSecurePort=0, ipcPort=46045, storageInfo=lv=-57;cid=testClusterID;nsid=1446199436;c=1734237478337), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-15T04:38:00,543 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190 2024-12-15T04:38:00,606 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/cluster_45efc259-f993-f806-507f-4a5cf2d8323c/zookeeper_0, clientPort=55935, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/cluster_45efc259-f993-f806-507f-4a5cf2d8323c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/cluster_45efc259-f993-f806-507f-4a5cf2d8323c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-15T04:38:00,614 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=55935 2024-12-15T04:38:00,627 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-15T04:38:00,629 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-15T04:38:00,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741825_1001 (size=7) 2024-12-15T04:38:01,240 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9 with version=8 2024-12-15T04:38:01,240 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/hbase-staging 2024-12-15T04:38:01,337 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-15T04:38:01,557 INFO [Time-limited test {}] client.ConnectionUtils(129): master/e56de37b85b3:0 server-side Connection retries=45 2024-12-15T04:38:01,571 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-15T04:38:01,572 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-15T04:38:01,572 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-15T04:38:01,572 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-15T04:38:01,572 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-15T04:38:01,673 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-15T04:38:01,724 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-15T04:38:01,731 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-15T04:38:01,734 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-15T04:38:01,755 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 43510 (auto-detected) 2024-12-15T04:38:01,756 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-15T04:38:01,772 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:35185 2024-12-15T04:38:01,779 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-15T04:38:01,781 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-15T04:38:01,791 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:35185 connecting to ZooKeeper ensemble=127.0.0.1:55935 2024-12-15T04:38:01,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:351850x0, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-15T04:38:01,862 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35185-0x10027fb030d0000 connected 2024-12-15T04:38:01,952 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-15T04:38:01,957 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-15T04:38:01,963 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-15T04:38:01,969 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35185 2024-12-15T04:38:01,969 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35185 2024-12-15T04:38:01,970 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35185 2024-12-15T04:38:01,970 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35185 2024-12-15T04:38:01,970 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35185 2024-12-15T04:38:01,976 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9, hbase.cluster.distributed=false 2024-12-15T04:38:02,035 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/e56de37b85b3:0 server-side Connection retries=45 2024-12-15T04:38:02,036 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-15T04:38:02,036 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-15T04:38:02,036 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-15T04:38:02,036 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-15T04:38:02,036 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-15T04:38:02,038 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-15T04:38:02,040 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-15T04:38:02,041 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:43199 2024-12-15T04:38:02,042 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-15T04:38:02,047 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-15T04:38:02,048 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-15T04:38:02,051 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-15T04:38:02,054 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:43199 connecting to ZooKeeper ensemble=127.0.0.1:55935 2024-12-15T04:38:02,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:431990x0, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-15T04:38:02,067 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:431990x0, quorum=127.0.0.1:55935, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-15T04:38:02,067 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43199-0x10027fb030d0001 connected 2024-12-15T04:38:02,068 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43199-0x10027fb030d0001, quorum=127.0.0.1:55935, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-15T04:38:02,069 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43199-0x10027fb030d0001, quorum=127.0.0.1:55935, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-15T04:38:02,070 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43199 2024-12-15T04:38:02,070 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43199 2024-12-15T04:38:02,071 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43199 2024-12-15T04:38:02,072 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43199 2024-12-15T04:38:02,073 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43199 2024-12-15T04:38:02,075 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/e56de37b85b3,35185,1734237481331 2024-12-15T04:38:02,087 DEBUG [M:0;e56de37b85b3:35185 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;e56de37b85b3:35185 2024-12-15T04:38:02,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43199-0x10027fb030d0001, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-15T04:38:02,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-15T04:38:02,093 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/e56de37b85b3,35185,1734237481331 2024-12-15T04:38:02,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-15T04:38:02,119 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43199-0x10027fb030d0001, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-15T04:38:02,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43199-0x10027fb030d0001, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T04:38:02,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T04:38:02,120 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-15T04:38:02,121 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/e56de37b85b3,35185,1734237481331 from backup master directory 2024-12-15T04:38:02,121 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-15T04:38:02,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/e56de37b85b3,35185,1734237481331 2024-12-15T04:38:02,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43199-0x10027fb030d0001, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-15T04:38:02,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-15T04:38:02,133 WARN [master/e56de37b85b3:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-15T04:38:02,133 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=e56de37b85b3,35185,1734237481331 2024-12-15T04:38:02,136 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-15T04:38:02,138 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-15T04:38:02,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741826_1002 (size=42) 2024-12-15T04:38:02,603 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/hbase.id with ID: 98ac0dde-cdd1-426c-a504-20202c03b673 2024-12-15T04:38:02,642 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-15T04:38:02,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43199-0x10027fb030d0001, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T04:38:02,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T04:38:02,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741827_1003 (size=196) 2024-12-15T04:38:03,139 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-15T04:38:03,140 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-15T04:38:03,157 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:03,161 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-15T04:38:03,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741828_1004 (size=1189) 2024-12-15T04:38:03,611 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/data/master/store 2024-12-15T04:38:03,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741829_1005 (size=34) 2024-12-15T04:38:04,036 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-15T04:38:04,037 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T04:38:04,038 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-15T04:38:04,038 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-15T04:38:04,038 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-15T04:38:04,038 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-15T04:38:04,038 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-15T04:38:04,038 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-15T04:38:04,038 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-15T04:38:04,040 WARN [master/e56de37b85b3:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/data/master/store/.initializing 2024-12-15T04:38:04,040 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/WALs/e56de37b85b3,35185,1734237481331 2024-12-15T04:38:04,046 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-15T04:38:04,055 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e56de37b85b3%2C35185%2C1734237481331, suffix=, logDir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/WALs/e56de37b85b3,35185,1734237481331, archiveDir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/oldWALs, maxLogs=10 2024-12-15T04:38:04,072 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/WALs/e56de37b85b3,35185,1734237481331/e56de37b85b3%2C35185%2C1734237481331.1734237484059, exclude list is [], retry=0 2024-12-15T04:38:04,086 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35109,DS-849ae404-fa04-48e7-bb9d-32a2aedce66f,DISK] 2024-12-15T04:38:04,089 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-15T04:38:04,121 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/WALs/e56de37b85b3,35185,1734237481331/e56de37b85b3%2C35185%2C1734237481331.1734237484059 2024-12-15T04:38:04,122 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33999:33999)] 2024-12-15T04:38:04,123 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-15T04:38:04,123 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T04:38:04,127 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-15T04:38:04,129 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-15T04:38:04,168 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-15T04:38:04,189 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-15T04:38:04,193 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:04,195 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-15T04:38:04,196 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-15T04:38:04,199 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-15T04:38:04,200 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:04,201 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:38:04,201 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-15T04:38:04,204 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-15T04:38:04,204 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:04,206 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:38:04,206 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-15T04:38:04,208 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-15T04:38:04,208 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:04,209 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:38:04,213 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-15T04:38:04,214 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-15T04:38:04,222 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-15T04:38:04,225 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-15T04:38:04,230 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T04:38:04,231 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70903622, jitterRate=0.056546300649642944}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-15T04:38:04,234 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-15T04:38:04,235 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-15T04:38:04,259 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@769bc9a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:38:04,284 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-15T04:38:04,293 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-15T04:38:04,293 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-15T04:38:04,295 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-15T04:38:04,296 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-15T04:38:04,300 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 3 msec 2024-12-15T04:38:04,300 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-15T04:38:04,321 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-15T04:38:04,331 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-15T04:38:04,349 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-15T04:38:04,351 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-15T04:38:04,352 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-15T04:38:04,416 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-15T04:38:04,419 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-15T04:38:04,427 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-15T04:38:04,614 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-15T04:38:04,617 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-15T04:38:04,674 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-15T04:38:04,691 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-15T04:38:04,698 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-15T04:38:04,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43199-0x10027fb030d0001, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-15T04:38:04,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-15T04:38:04,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43199-0x10027fb030d0001, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T04:38:04,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T04:38:04,712 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=e56de37b85b3,35185,1734237481331, sessionid=0x10027fb030d0000, setting cluster-up flag (Was=false) 2024-12-15T04:38:04,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T04:38:04,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43199-0x10027fb030d0001, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T04:38:04,766 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-15T04:38:04,769 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e56de37b85b3,35185,1734237481331 2024-12-15T04:38:04,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T04:38:04,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43199-0x10027fb030d0001, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T04:38:04,824 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-15T04:38:04,825 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=e56de37b85b3,35185,1734237481331 2024-12-15T04:38:04,889 DEBUG [RS:0;e56de37b85b3:43199 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;e56de37b85b3:43199 2024-12-15T04:38:04,890 INFO [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(1008): ClusterId : 98ac0dde-cdd1-426c-a504-20202c03b673 2024-12-15T04:38:04,893 DEBUG [RS:0;e56de37b85b3:43199 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-15T04:38:04,904 DEBUG [RS:0;e56de37b85b3:43199 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-15T04:38:04,904 DEBUG [RS:0;e56de37b85b3:43199 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-15T04:38:04,904 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-15T04:38:04,910 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-15T04:38:04,912 DEBUG [RS:0;e56de37b85b3:43199 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-15T04:38:04,912 DEBUG [RS:0;e56de37b85b3:43199 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ce87ca1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:38:04,913 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-15T04:38:04,914 DEBUG [RS:0;e56de37b85b3:43199 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29e856c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e56de37b85b3/172.17.0.2:0 2024-12-15T04:38:04,916 INFO [RS:0;e56de37b85b3:43199 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-15T04:38:04,916 INFO [RS:0;e56de37b85b3:43199 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-15T04:38:04,916 DEBUG [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-15T04:38:04,918 INFO [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(3073): reportForDuty to master=e56de37b85b3,35185,1734237481331 with isa=e56de37b85b3/172.17.0.2:43199, startcode=1734237482035 2024-12-15T04:38:04,917 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: e56de37b85b3,35185,1734237481331 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-15T04:38:04,920 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/e56de37b85b3:0, corePoolSize=5, maxPoolSize=5 2024-12-15T04:38:04,920 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/e56de37b85b3:0, corePoolSize=5, maxPoolSize=5 2024-12-15T04:38:04,920 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/e56de37b85b3:0, corePoolSize=5, maxPoolSize=5 2024-12-15T04:38:04,921 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/e56de37b85b3:0, corePoolSize=5, maxPoolSize=5 2024-12-15T04:38:04,921 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/e56de37b85b3:0, corePoolSize=10, maxPoolSize=10 2024-12-15T04:38:04,921 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/e56de37b85b3:0, corePoolSize=1, maxPoolSize=1 2024-12-15T04:38:04,921 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/e56de37b85b3:0, corePoolSize=2, maxPoolSize=2 2024-12-15T04:38:04,921 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/e56de37b85b3:0, corePoolSize=1, maxPoolSize=1 2024-12-15T04:38:04,924 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1734237514924 2024-12-15T04:38:04,926 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-15T04:38:04,927 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-15T04:38:04,927 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-15T04:38:04,927 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-15T04:38:04,929 DEBUG [RS:0;e56de37b85b3:43199 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-15T04:38:04,931 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:04,931 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-15T04:38:04,931 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-15T04:38:04,931 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-15T04:38:04,932 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-15T04:38:04,932 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-15T04:38:04,933 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-15T04:38:04,935 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-15T04:38:04,937 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-15T04:38:04,937 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-15T04:38:04,940 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-15T04:38:04,940 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-15T04:38:04,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741831_1007 (size=1039) 2024-12-15T04:38:04,942 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/e56de37b85b3:0:becomeActiveMaster-HFileCleaner.large.0-1734237484942,5,FailOnTimeoutGroup] 2024-12-15T04:38:04,942 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/e56de37b85b3:0:becomeActiveMaster-HFileCleaner.small.0-1734237484942,5,FailOnTimeoutGroup] 2024-12-15T04:38:04,942 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-15T04:38:04,942 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-15T04:38:04,943 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-15T04:38:04,944 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-15T04:38:04,962 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46433, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-15T04:38:04,967 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35185 {}] master.ServerManager(332): Checking decommissioned status of RegionServer e56de37b85b3,43199,1734237482035 2024-12-15T04:38:04,969 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35185 {}] master.ServerManager(486): Registering regionserver=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:04,980 DEBUG [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9 2024-12-15T04:38:04,980 DEBUG [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:35921 2024-12-15T04:38:04,980 DEBUG [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-15T04:38:04,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-15T04:38:04,991 DEBUG [RS:0;e56de37b85b3:43199 {}] zookeeper.ZKUtil(111): regionserver:43199-0x10027fb030d0001, quorum=127.0.0.1:55935, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/e56de37b85b3,43199,1734237482035 2024-12-15T04:38:04,991 WARN [RS:0;e56de37b85b3:43199 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-15T04:38:04,991 INFO [RS:0;e56de37b85b3:43199 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-15T04:38:04,992 DEBUG [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/WALs/e56de37b85b3,43199,1734237482035 2024-12-15T04:38:04,993 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [e56de37b85b3,43199,1734237482035] 2024-12-15T04:38:05,006 DEBUG [RS:0;e56de37b85b3:43199 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-15T04:38:05,015 INFO [RS:0;e56de37b85b3:43199 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-15T04:38:05,025 INFO [RS:0;e56de37b85b3:43199 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-15T04:38:05,027 INFO [RS:0;e56de37b85b3:43199 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-15T04:38:05,027 INFO [RS:0;e56de37b85b3:43199 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-15T04:38:05,028 INFO [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-15T04:38:05,034 INFO [RS:0;e56de37b85b3:43199 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-15T04:38:05,034 DEBUG [RS:0;e56de37b85b3:43199 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/e56de37b85b3:0, corePoolSize=1, maxPoolSize=1 2024-12-15T04:38:05,034 DEBUG [RS:0;e56de37b85b3:43199 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/e56de37b85b3:0, corePoolSize=1, maxPoolSize=1 2024-12-15T04:38:05,034 DEBUG [RS:0;e56de37b85b3:43199 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/e56de37b85b3:0, corePoolSize=1, maxPoolSize=1 2024-12-15T04:38:05,034 DEBUG [RS:0;e56de37b85b3:43199 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/e56de37b85b3:0, corePoolSize=1, maxPoolSize=1 2024-12-15T04:38:05,034 DEBUG [RS:0;e56de37b85b3:43199 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/e56de37b85b3:0, corePoolSize=1, maxPoolSize=1 2024-12-15T04:38:05,035 DEBUG [RS:0;e56de37b85b3:43199 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/e56de37b85b3:0, corePoolSize=2, maxPoolSize=2 2024-12-15T04:38:05,035 DEBUG [RS:0;e56de37b85b3:43199 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/e56de37b85b3:0, corePoolSize=1, maxPoolSize=1 2024-12-15T04:38:05,035 DEBUG [RS:0;e56de37b85b3:43199 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/e56de37b85b3:0, corePoolSize=1, maxPoolSize=1 2024-12-15T04:38:05,035 DEBUG [RS:0;e56de37b85b3:43199 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/e56de37b85b3:0, corePoolSize=1, maxPoolSize=1 2024-12-15T04:38:05,035 DEBUG [RS:0;e56de37b85b3:43199 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/e56de37b85b3:0, corePoolSize=1, maxPoolSize=1 2024-12-15T04:38:05,035 DEBUG [RS:0;e56de37b85b3:43199 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/e56de37b85b3:0, corePoolSize=1, maxPoolSize=1 2024-12-15T04:38:05,035 DEBUG [RS:0;e56de37b85b3:43199 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/e56de37b85b3:0, corePoolSize=3, maxPoolSize=3 2024-12-15T04:38:05,035 DEBUG [RS:0;e56de37b85b3:43199 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0, corePoolSize=3, maxPoolSize=3 2024-12-15T04:38:05,036 INFO [RS:0;e56de37b85b3:43199 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-15T04:38:05,036 INFO [RS:0;e56de37b85b3:43199 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-15T04:38:05,036 INFO [RS:0;e56de37b85b3:43199 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-15T04:38:05,037 INFO [RS:0;e56de37b85b3:43199 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-15T04:38:05,037 INFO [RS:0;e56de37b85b3:43199 {}] hbase.ChoreService(168): Chore ScheduledChore name=e56de37b85b3,43199,1734237482035-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-15T04:38:05,052 INFO [RS:0;e56de37b85b3:43199 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-15T04:38:05,053 INFO [RS:0;e56de37b85b3:43199 {}] hbase.ChoreService(168): Chore ScheduledChore name=e56de37b85b3,43199,1734237482035-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-15T04:38:05,069 INFO [RS:0;e56de37b85b3:43199 {}] regionserver.Replication(204): e56de37b85b3,43199,1734237482035 started 2024-12-15T04:38:05,069 INFO [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(1767): Serving as e56de37b85b3,43199,1734237482035, RpcServer on e56de37b85b3/172.17.0.2:43199, sessionid=0x10027fb030d0001 2024-12-15T04:38:05,069 DEBUG [RS:0;e56de37b85b3:43199 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-15T04:38:05,070 DEBUG [RS:0;e56de37b85b3:43199 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager e56de37b85b3,43199,1734237482035 2024-12-15T04:38:05,070 DEBUG [RS:0;e56de37b85b3:43199 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e56de37b85b3,43199,1734237482035' 2024-12-15T04:38:05,070 DEBUG [RS:0;e56de37b85b3:43199 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-15T04:38:05,071 DEBUG [RS:0;e56de37b85b3:43199 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-15T04:38:05,071 DEBUG [RS:0;e56de37b85b3:43199 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-15T04:38:05,071 DEBUG [RS:0;e56de37b85b3:43199 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-15T04:38:05,071 DEBUG [RS:0;e56de37b85b3:43199 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager e56de37b85b3,43199,1734237482035 2024-12-15T04:38:05,071 DEBUG [RS:0;e56de37b85b3:43199 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'e56de37b85b3,43199,1734237482035' 2024-12-15T04:38:05,071 DEBUG [RS:0;e56de37b85b3:43199 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-15T04:38:05,072 DEBUG [RS:0;e56de37b85b3:43199 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-15T04:38:05,073 DEBUG [RS:0;e56de37b85b3:43199 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-15T04:38:05,073 INFO [RS:0;e56de37b85b3:43199 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-15T04:38:05,073 INFO [RS:0;e56de37b85b3:43199 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-15T04:38:05,186 INFO [RS:0;e56de37b85b3:43199 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-15T04:38:05,190 INFO [RS:0;e56de37b85b3:43199 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e56de37b85b3%2C43199%2C1734237482035, suffix=, logDir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/WALs/e56de37b85b3,43199,1734237482035, archiveDir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/oldWALs, maxLogs=32 2024-12-15T04:38:05,202 DEBUG [RS:0;e56de37b85b3:43199 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/WALs/e56de37b85b3,43199,1734237482035/e56de37b85b3%2C43199%2C1734237482035.1734237485191, exclude list is [], retry=0 2024-12-15T04:38:05,207 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35109,DS-849ae404-fa04-48e7-bb9d-32a2aedce66f,DISK] 2024-12-15T04:38:05,210 INFO [RS:0;e56de37b85b3:43199 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/WALs/e56de37b85b3,43199,1734237482035/e56de37b85b3%2C43199%2C1734237482035.1734237485191 2024-12-15T04:38:05,210 DEBUG [RS:0;e56de37b85b3:43199 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33999:33999)] 2024-12-15T04:38:05,346 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-15T04:38:05,347 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9 2024-12-15T04:38:05,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741833_1009 (size=32) 2024-12-15T04:38:05,760 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T04:38:05,762 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-15T04:38:05,766 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-15T04:38:05,767 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:05,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-15T04:38:05,768 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-15T04:38:05,772 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-15T04:38:05,772 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:05,773 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-15T04:38:05,774 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-15T04:38:05,777 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-15T04:38:05,777 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:05,778 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-15T04:38:05,780 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/meta/1588230740 2024-12-15T04:38:05,782 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/meta/1588230740 2024-12-15T04:38:05,785 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-15T04:38:05,788 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-15T04:38:05,792 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T04:38:05,793 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58799901, jitterRate=-0.123813197016716}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-15T04:38:05,797 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-15T04:38:05,797 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-15T04:38:05,797 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-15T04:38:05,797 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-15T04:38:05,797 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-15T04:38:05,797 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-15T04:38:05,798 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-15T04:38:05,798 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-15T04:38:05,801 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-15T04:38:05,801 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-15T04:38:05,806 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-15T04:38:05,813 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-15T04:38:05,815 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-15T04:38:05,969 DEBUG [e56de37b85b3:35185 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-15T04:38:05,980 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:05,989 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e56de37b85b3,43199,1734237482035, state=OPENING 2024-12-15T04:38:06,024 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-15T04:38:06,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T04:38:06,032 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43199-0x10027fb030d0001, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T04:38:06,033 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-15T04:38:06,033 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-15T04:38:06,035 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=e56de37b85b3,43199,1734237482035}] 2024-12-15T04:38:06,216 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:06,217 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-15T04:38:06,220 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40446, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-15T04:38:06,230 INFO [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-15T04:38:06,231 INFO [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-15T04:38:06,231 INFO [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-15T04:38:06,234 INFO [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=e56de37b85b3%2C43199%2C1734237482035.meta, suffix=.meta, logDir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/WALs/e56de37b85b3,43199,1734237482035, archiveDir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/oldWALs, maxLogs=32 2024-12-15T04:38:06,247 DEBUG [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/WALs/e56de37b85b3,43199,1734237482035/e56de37b85b3%2C43199%2C1734237482035.meta.1734237486235.meta, exclude list is [], retry=0 2024-12-15T04:38:06,250 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:35109,DS-849ae404-fa04-48e7-bb9d-32a2aedce66f,DISK] 2024-12-15T04:38:06,253 INFO [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/WALs/e56de37b85b3,43199,1734237482035/e56de37b85b3%2C43199%2C1734237482035.meta.1734237486235.meta 2024-12-15T04:38:06,253 DEBUG [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33999:33999)] 2024-12-15T04:38:06,253 DEBUG [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-15T04:38:06,254 DEBUG [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-15T04:38:06,299 DEBUG [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-15T04:38:06,302 INFO [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-15T04:38:06,305 DEBUG [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-15T04:38:06,305 DEBUG [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T04:38:06,306 DEBUG [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-15T04:38:06,306 DEBUG [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-15T04:38:06,308 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-15T04:38:06,310 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-15T04:38:06,310 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:06,311 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-15T04:38:06,311 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-15T04:38:06,312 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-15T04:38:06,312 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:06,313 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-15T04:38:06,313 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-15T04:38:06,314 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-15T04:38:06,314 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:06,315 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-15T04:38:06,316 DEBUG [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/meta/1588230740 2024-12-15T04:38:06,319 DEBUG [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/meta/1588230740 2024-12-15T04:38:06,321 DEBUG [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-15T04:38:06,323 DEBUG [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-15T04:38:06,325 INFO [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63378134, jitterRate=-0.05559220910072327}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-15T04:38:06,326 DEBUG [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-15T04:38:06,333 INFO [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1734237486212 2024-12-15T04:38:06,343 DEBUG [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-15T04:38:06,343 INFO [RS_OPEN_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-15T04:38:06,344 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:06,346 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as e56de37b85b3,43199,1734237482035, state=OPEN 2024-12-15T04:38:06,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43199-0x10027fb030d0001, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-15T04:38:06,406 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-15T04:38:06,406 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-15T04:38:06,406 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-15T04:38:06,414 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-15T04:38:06,414 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=e56de37b85b3,43199,1734237482035 in 371 msec 2024-12-15T04:38:06,422 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-15T04:38:06,422 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 609 msec 2024-12-15T04:38:06,428 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.5660 sec 2024-12-15T04:38:06,428 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1734237486428, completionTime=-1 2024-12-15T04:38:06,428 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-12-15T04:38:06,429 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-15T04:38:06,461 DEBUG [hconnection-0xf95a041-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:38:06,463 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40452, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:38:06,472 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-12-15T04:38:06,472 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1734237546472 2024-12-15T04:38:06,472 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1734237606472 2024-12-15T04:38:06,473 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 44 msec 2024-12-15T04:38:06,518 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e56de37b85b3,35185,1734237481331-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-15T04:38:06,518 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e56de37b85b3,35185,1734237481331-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-15T04:38:06,518 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e56de37b85b3,35185,1734237481331-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-15T04:38:06,519 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-e56de37b85b3:35185, period=300000, unit=MILLISECONDS is enabled. 2024-12-15T04:38:06,520 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-15T04:38:06,525 DEBUG [master/e56de37b85b3:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-15T04:38:06,528 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-15T04:38:06,529 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-15T04:38:06,535 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-15T04:38:06,537 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-15T04:38:06,538 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:06,540 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-15T04:38:06,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741835_1011 (size=358) 2024-12-15T04:38:06,961 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 7a581d77bd6cf0246603236a6705aded, NAME => 'hbase:namespace,,1734237486528.7a581d77bd6cf0246603236a6705aded.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9 2024-12-15T04:38:06,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741836_1012 (size=42) 2024-12-15T04:38:07,373 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734237486528.7a581d77bd6cf0246603236a6705aded.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T04:38:07,373 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 7a581d77bd6cf0246603236a6705aded, disabling compactions & flushes 2024-12-15T04:38:07,373 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734237486528.7a581d77bd6cf0246603236a6705aded. 2024-12-15T04:38:07,373 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734237486528.7a581d77bd6cf0246603236a6705aded. 2024-12-15T04:38:07,374 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734237486528.7a581d77bd6cf0246603236a6705aded. after waiting 0 ms 2024-12-15T04:38:07,374 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734237486528.7a581d77bd6cf0246603236a6705aded. 2024-12-15T04:38:07,374 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1734237486528.7a581d77bd6cf0246603236a6705aded. 2024-12-15T04:38:07,374 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 7a581d77bd6cf0246603236a6705aded: 2024-12-15T04:38:07,378 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-15T04:38:07,386 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1734237486528.7a581d77bd6cf0246603236a6705aded.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1734237487379"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734237487379"}]},"ts":"1734237487379"} 2024-12-15T04:38:07,405 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-15T04:38:07,407 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-15T04:38:07,409 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237487407"}]},"ts":"1734237487407"} 2024-12-15T04:38:07,413 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-15T04:38:07,460 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=7a581d77bd6cf0246603236a6705aded, ASSIGN}] 2024-12-15T04:38:07,464 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=7a581d77bd6cf0246603236a6705aded, ASSIGN 2024-12-15T04:38:07,467 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=7a581d77bd6cf0246603236a6705aded, ASSIGN; state=OFFLINE, location=e56de37b85b3,43199,1734237482035; forceNewPlan=false, retain=false 2024-12-15T04:38:07,618 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=7a581d77bd6cf0246603236a6705aded, regionState=OPENING, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:07,627 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 7a581d77bd6cf0246603236a6705aded, server=e56de37b85b3,43199,1734237482035}] 2024-12-15T04:38:07,783 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:07,795 INFO [RS_OPEN_PRIORITY_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1734237486528.7a581d77bd6cf0246603236a6705aded. 2024-12-15T04:38:07,795 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 7a581d77bd6cf0246603236a6705aded, NAME => 'hbase:namespace,,1734237486528.7a581d77bd6cf0246603236a6705aded.', STARTKEY => '', ENDKEY => ''} 2024-12-15T04:38:07,796 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 7a581d77bd6cf0246603236a6705aded 2024-12-15T04:38:07,796 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1734237486528.7a581d77bd6cf0246603236a6705aded.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T04:38:07,796 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 7a581d77bd6cf0246603236a6705aded 2024-12-15T04:38:07,796 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 7a581d77bd6cf0246603236a6705aded 2024-12-15T04:38:07,799 INFO [StoreOpener-7a581d77bd6cf0246603236a6705aded-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 7a581d77bd6cf0246603236a6705aded 2024-12-15T04:38:07,801 INFO [StoreOpener-7a581d77bd6cf0246603236a6705aded-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7a581d77bd6cf0246603236a6705aded columnFamilyName info 2024-12-15T04:38:07,802 DEBUG [StoreOpener-7a581d77bd6cf0246603236a6705aded-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:07,803 INFO [StoreOpener-7a581d77bd6cf0246603236a6705aded-1 {}] regionserver.HStore(327): Store=7a581d77bd6cf0246603236a6705aded/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:38:07,805 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/namespace/7a581d77bd6cf0246603236a6705aded 2024-12-15T04:38:07,806 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/namespace/7a581d77bd6cf0246603236a6705aded 2024-12-15T04:38:07,811 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 7a581d77bd6cf0246603236a6705aded 2024-12-15T04:38:07,816 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/namespace/7a581d77bd6cf0246603236a6705aded/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T04:38:07,817 INFO [RS_OPEN_PRIORITY_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 7a581d77bd6cf0246603236a6705aded; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67093629, jitterRate=-2.2701919078826904E-4}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-15T04:38:07,819 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 7a581d77bd6cf0246603236a6705aded: 2024-12-15T04:38:07,821 INFO [RS_OPEN_PRIORITY_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1734237486528.7a581d77bd6cf0246603236a6705aded., pid=6, masterSystemTime=1734237487783 2024-12-15T04:38:07,824 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1734237486528.7a581d77bd6cf0246603236a6705aded. 2024-12-15T04:38:07,825 INFO [RS_OPEN_PRIORITY_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1734237486528.7a581d77bd6cf0246603236a6705aded. 2024-12-15T04:38:07,825 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=7a581d77bd6cf0246603236a6705aded, regionState=OPEN, openSeqNum=2, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:07,832 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-15T04:38:07,833 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 7a581d77bd6cf0246603236a6705aded, server=e56de37b85b3,43199,1734237482035 in 201 msec 2024-12-15T04:38:07,836 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-15T04:38:07,836 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=7a581d77bd6cf0246603236a6705aded, ASSIGN in 373 msec 2024-12-15T04:38:07,838 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-15T04:38:07,838 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237487838"}]},"ts":"1734237487838"} 2024-12-15T04:38:07,845 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-15T04:38:07,885 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-15T04:38:07,889 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.3560 sec 2024-12-15T04:38:07,940 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-15T04:38:07,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-15T04:38:07,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43199-0x10027fb030d0001, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T04:38:07,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T04:38:07,977 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-15T04:38:07,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-15T04:38:08,013 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 36 msec 2024-12-15T04:38:08,022 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-15T04:38:08,040 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-15T04:38:08,054 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 29 msec 2024-12-15T04:38:08,082 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-15T04:38:08,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-15T04:38:08,099 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 5.966sec 2024-12-15T04:38:08,101 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-15T04:38:08,104 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-15T04:38:08,105 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-15T04:38:08,106 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-15T04:38:08,106 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-15T04:38:08,107 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e56de37b85b3,35185,1734237481331-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-15T04:38:08,108 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e56de37b85b3,35185,1734237481331-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-15T04:38:08,116 DEBUG [master/e56de37b85b3:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-15T04:38:08,117 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-15T04:38:08,117 INFO [master/e56de37b85b3:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=e56de37b85b3,35185,1734237481331-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-15T04:38:08,196 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e83c466 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39dee83f 2024-12-15T04:38:08,197 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-15T04:38:08,209 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67b8b597, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:38:08,211 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-15T04:38:08,211 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-15T04:38:08,220 DEBUG [hconnection-0x4c09ef46-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:38:08,228 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40456, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:38:08,235 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=e56de37b85b3,35185,1734237481331 2024-12-15T04:38:08,246 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=86, ProcessCount=11, AvailableMemoryMB=5226 2024-12-15T04:38:08,268 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-15T04:38:08,271 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57756, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-15T04:38:08,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-15T04:38:08,281 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-15T04:38:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-15T04:38:08,285 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-15T04:38:08,285 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:08,285 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-12-15T04:38:08,290 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-15T04:38:08,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-15T04:38:08,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741837_1013 (size=963) 2024-12-15T04:38:08,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-15T04:38:08,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-15T04:38:08,704 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9 2024-12-15T04:38:08,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741838_1014 (size=53) 2024-12-15T04:38:08,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-15T04:38:09,116 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T04:38:09,116 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 6443b0fc7191a86cb86de2a8c7e17f47, disabling compactions & flushes 2024-12-15T04:38:09,116 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:09,116 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:09,116 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. after waiting 0 ms 2024-12-15T04:38:09,116 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:09,116 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:09,117 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:09,119 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-15T04:38:09,119 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734237489119"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734237489119"}]},"ts":"1734237489119"} 2024-12-15T04:38:09,123 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-15T04:38:09,125 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-15T04:38:09,125 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237489125"}]},"ts":"1734237489125"} 2024-12-15T04:38:09,129 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-15T04:38:09,174 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6443b0fc7191a86cb86de2a8c7e17f47, ASSIGN}] 2024-12-15T04:38:09,178 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6443b0fc7191a86cb86de2a8c7e17f47, ASSIGN 2024-12-15T04:38:09,181 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=6443b0fc7191a86cb86de2a8c7e17f47, ASSIGN; state=OFFLINE, location=e56de37b85b3,43199,1734237482035; forceNewPlan=false, retain=false 2024-12-15T04:38:09,333 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=6443b0fc7191a86cb86de2a8c7e17f47, regionState=OPENING, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:09,340 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035}] 2024-12-15T04:38:09,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-15T04:38:09,497 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:09,509 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:09,509 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} 2024-12-15T04:38:09,510 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:09,510 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T04:38:09,510 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:09,510 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:09,512 INFO [StoreOpener-6443b0fc7191a86cb86de2a8c7e17f47-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:09,516 INFO [StoreOpener-6443b0fc7191a86cb86de2a8c7e17f47-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:38:09,516 INFO [StoreOpener-6443b0fc7191a86cb86de2a8c7e17f47-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6443b0fc7191a86cb86de2a8c7e17f47 columnFamilyName A 2024-12-15T04:38:09,516 DEBUG [StoreOpener-6443b0fc7191a86cb86de2a8c7e17f47-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:09,518 INFO [StoreOpener-6443b0fc7191a86cb86de2a8c7e17f47-1 {}] regionserver.HStore(327): Store=6443b0fc7191a86cb86de2a8c7e17f47/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:38:09,518 INFO [StoreOpener-6443b0fc7191a86cb86de2a8c7e17f47-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:09,520 INFO [StoreOpener-6443b0fc7191a86cb86de2a8c7e17f47-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:38:09,520 INFO [StoreOpener-6443b0fc7191a86cb86de2a8c7e17f47-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6443b0fc7191a86cb86de2a8c7e17f47 columnFamilyName B 2024-12-15T04:38:09,520 DEBUG [StoreOpener-6443b0fc7191a86cb86de2a8c7e17f47-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:09,521 INFO [StoreOpener-6443b0fc7191a86cb86de2a8c7e17f47-1 {}] regionserver.HStore(327): Store=6443b0fc7191a86cb86de2a8c7e17f47/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:38:09,522 INFO [StoreOpener-6443b0fc7191a86cb86de2a8c7e17f47-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:09,524 INFO [StoreOpener-6443b0fc7191a86cb86de2a8c7e17f47-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:38:09,524 INFO [StoreOpener-6443b0fc7191a86cb86de2a8c7e17f47-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6443b0fc7191a86cb86de2a8c7e17f47 columnFamilyName C 2024-12-15T04:38:09,524 DEBUG [StoreOpener-6443b0fc7191a86cb86de2a8c7e17f47-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:09,525 INFO [StoreOpener-6443b0fc7191a86cb86de2a8c7e17f47-1 {}] regionserver.HStore(327): Store=6443b0fc7191a86cb86de2a8c7e17f47/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:38:09,525 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:09,527 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:09,528 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:09,530 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-15T04:38:09,533 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:09,536 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T04:38:09,537 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 6443b0fc7191a86cb86de2a8c7e17f47; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72909582, jitterRate=0.08643743395805359}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-15T04:38:09,538 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:09,540 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., pid=11, masterSystemTime=1734237489496 2024-12-15T04:38:09,542 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:09,542 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:09,543 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=6443b0fc7191a86cb86de2a8c7e17f47, regionState=OPEN, openSeqNum=2, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:09,548 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-15T04:38:09,548 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 in 205 msec 2024-12-15T04:38:09,552 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-15T04:38:09,552 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6443b0fc7191a86cb86de2a8c7e17f47, ASSIGN in 374 msec 2024-12-15T04:38:09,553 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-15T04:38:09,553 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237489553"}]},"ts":"1734237489553"} 2024-12-15T04:38:09,556 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-15T04:38:09,567 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-15T04:38:09,570 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2860 sec 2024-12-15T04:38:10,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-15T04:38:10,416 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-12-15T04:38:10,423 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0e98ea32 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b9fcedf 2024-12-15T04:38:10,458 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e71e468, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:38:10,461 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:38:10,463 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46180, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:38:10,467 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-15T04:38:10,470 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44402, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-15T04:38:10,478 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x12885408 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9bd0964 2024-12-15T04:38:10,491 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c63ae4e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:38:10,492 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72b32f98 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1324ee83 2024-12-15T04:38:10,503 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@736f1673, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:38:10,505 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04977266 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@45b55c24 2024-12-15T04:38:10,516 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ee2166f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:38:10,518 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6bbb5d8a to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@48068a5 2024-12-15T04:38:10,528 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f34ff67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:38:10,530 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x18603bb9 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3883f7b 2024-12-15T04:38:10,541 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b5f27aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:38:10,543 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72e97e4b to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12a1285d 2024-12-15T04:38:10,553 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c3b736e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:38:10,555 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x490457fd to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@527c6d40 2024-12-15T04:38:10,566 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@353bc462, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:38:10,568 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c8de680 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47fe2fa7 2024-12-15T04:38:10,578 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6502d571, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:38:10,580 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f6b07e3 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@595e9ebe 2024-12-15T04:38:10,591 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a0471b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:38:10,597 DEBUG [hconnection-0x63f71b8d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:38:10,597 DEBUG [hconnection-0x1b94e7c8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:38:10,597 DEBUG [hconnection-0x76910286-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:38:10,598 DEBUG [hconnection-0x5fa3171e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:38:10,598 DEBUG [hconnection-0x1a036934-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:38:10,598 DEBUG [hconnection-0x54e848df-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:38:10,599 DEBUG [hconnection-0x460db217-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:38:10,599 DEBUG [hconnection-0x13693b96-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:38:10,600 DEBUG [hconnection-0x53bc7585-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:38:10,600 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46190, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:38:10,601 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46200, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:38:10,601 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46212, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:38:10,601 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46192, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:38:10,602 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46222, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:38:10,603 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:38:10,603 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46234, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:38:10,605 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46246, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:38:10,605 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46250, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:38:10,605 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46242, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:38:10,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-12-15T04:38:10,613 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:38:10,616 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:38:10,617 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:38:10,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-15T04:38:10,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:10,686 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-15T04:38:10,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:10,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:10,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:10,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:10,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:10,698 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:10,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-15T04:38:10,786 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:10,788 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-15T04:38:10,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:10,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:10,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:10,804 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:10,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:10,819 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/ca561e8d1c764e059f9537a5e5fae500 is 50, key is test_row_0/A:col10/1734237490677/Put/seqid=0 2024-12-15T04:38:10,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:10,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:10,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1734237550825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:10,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:10,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237550826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:10,841 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:10,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237550834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:10,842 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:10,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46250 deadline: 1734237550834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:10,843 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:10,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237550837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:10,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741839_1015 (size=12001) 2024-12-15T04:38:10,858 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/ca561e8d1c764e059f9537a5e5fae500 2024-12-15T04:38:10,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-15T04:38:10,972 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:10,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:10,973 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-15T04:38:10,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237550965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:10,974 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:10,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:10,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237550966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:10,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:10,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:10,975 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:10,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:10,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:10,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46250 deadline: 1734237550966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:10,980 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:10,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:10,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237550965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:10,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:10,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1734237550973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:10,988 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/c23dd2cd6e544ed884327e48a89722e4 is 50, key is test_row_0/B:col10/1734237490677/Put/seqid=0 2024-12-15T04:38:11,010 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-15T04:38:11,012 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-15T04:38:11,013 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-15T04:38:11,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741840_1016 (size=12001) 2024-12-15T04:38:11,030 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/c23dd2cd6e544ed884327e48a89722e4 2024-12-15T04:38:11,076 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/11c541a1a63c41d79e75aae1ab212f71 is 50, key is test_row_0/C:col10/1734237490677/Put/seqid=0 2024-12-15T04:38:11,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741841_1017 (size=12001) 2024-12-15T04:38:11,102 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/11c541a1a63c41d79e75aae1ab212f71 2024-12-15T04:38:11,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/ca561e8d1c764e059f9537a5e5fae500 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/ca561e8d1c764e059f9537a5e5fae500 2024-12-15T04:38:11,131 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/ca561e8d1c764e059f9537a5e5fae500, entries=150, sequenceid=14, filesize=11.7 K 2024-12-15T04:38:11,134 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,135 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-15T04:38:11,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:11,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:11,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:11,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/c23dd2cd6e544ed884327e48a89722e4 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/c23dd2cd6e544ed884327e48a89722e4 2024-12-15T04:38:11,136 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:11,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:11,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:11,150 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/c23dd2cd6e544ed884327e48a89722e4, entries=150, sequenceid=14, filesize=11.7 K 2024-12-15T04:38:11,154 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/11c541a1a63c41d79e75aae1ab212f71 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/11c541a1a63c41d79e75aae1ab212f71 2024-12-15T04:38:11,168 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/11c541a1a63c41d79e75aae1ab212f71, entries=150, sequenceid=14, filesize=11.7 K 2024-12-15T04:38:11,170 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 6443b0fc7191a86cb86de2a8c7e17f47 in 483ms, sequenceid=14, compaction requested=false 2024-12-15T04:38:11,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:11,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:11,195 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-15T04:38:11,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:11,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:11,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:11,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:11,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:11,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:11,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/fd1e2eeb276e415295e07f9c004adc1d is 50, key is test_row_0/A:col10/1734237490831/Put/seqid=0 2024-12-15T04:38:11,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-15T04:38:11,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741842_1018 (size=14341) 2024-12-15T04:38:11,231 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/fd1e2eeb276e415295e07f9c004adc1d 2024-12-15T04:38:11,241 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:11,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237551224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:11,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46250 deadline: 1734237551228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:11,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237551234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:11,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237551238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:11,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1734237551242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,253 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/b896ef23c0f14231aa94eb3e073fae98 is 50, key is test_row_0/B:col10/1734237490831/Put/seqid=0 2024-12-15T04:38:11,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741843_1019 (size=12001) 2024-12-15T04:38:11,290 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-15T04:38:11,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:11,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:11,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:11,292 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:11,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:11,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:11,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:11,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46250 deadline: 1734237551346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:11,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237551346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,356 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:11,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237551349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,359 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:11,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1734237551356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:11,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237551358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,449 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,450 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-15T04:38:11,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:11,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:11,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:11,450 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:11,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:11,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:11,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:11,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46250 deadline: 1734237551557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:11,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237551557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:11,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237551560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:11,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1734237551563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:11,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237551564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,629 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-15T04:38:11,629 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:11,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:11,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:11,630 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:11,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:11,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:11,668 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/b896ef23c0f14231aa94eb3e073fae98 2024-12-15T04:38:11,690 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/90b6c7b01bf048e98fdb278de48a70b5 is 50, key is test_row_0/C:col10/1734237490831/Put/seqid=0 2024-12-15T04:38:11,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741844_1020 (size=12001) 2024-12-15T04:38:11,722 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/90b6c7b01bf048e98fdb278de48a70b5 2024-12-15T04:38:11,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-15T04:38:11,734 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/fd1e2eeb276e415295e07f9c004adc1d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/fd1e2eeb276e415295e07f9c004adc1d 2024-12-15T04:38:11,746 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/fd1e2eeb276e415295e07f9c004adc1d, entries=200, sequenceid=38, filesize=14.0 K 2024-12-15T04:38:11,748 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/b896ef23c0f14231aa94eb3e073fae98 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/b896ef23c0f14231aa94eb3e073fae98 2024-12-15T04:38:11,760 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/b896ef23c0f14231aa94eb3e073fae98, entries=150, sequenceid=38, filesize=11.7 K 2024-12-15T04:38:11,763 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/90b6c7b01bf048e98fdb278de48a70b5 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/90b6c7b01bf048e98fdb278de48a70b5 2024-12-15T04:38:11,778 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/90b6c7b01bf048e98fdb278de48a70b5, entries=150, sequenceid=38, filesize=11.7 K 2024-12-15T04:38:11,780 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 6443b0fc7191a86cb86de2a8c7e17f47 in 584ms, sequenceid=38, compaction requested=false 2024-12-15T04:38:11,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:11,786 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,786 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-12-15T04:38:11,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:11,787 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-15T04:38:11,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:11,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:11,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:11,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:11,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:11,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:11,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/205f75d217004b1c95064f134e018b82 is 50, key is test_row_0/A:col10/1734237491237/Put/seqid=0 2024-12-15T04:38:11,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741845_1021 (size=12001) 2024-12-15T04:38:11,813 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/205f75d217004b1c95064f134e018b82 2024-12-15T04:38:11,817 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-15T04:38:11,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/4c0ea44de593420487517b88784c9de5 is 50, key is test_row_0/B:col10/1734237491237/Put/seqid=0 2024-12-15T04:38:11,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741846_1022 (size=12001) 2024-12-15T04:38:11,859 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/4c0ea44de593420487517b88784c9de5 2024-12-15T04:38:11,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:11,869 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:11,896 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-15T04:38:11,897 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-15T04:38:11,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/9b8a342e34fd4009ad69f6711bf453cd is 50, key is test_row_0/C:col10/1734237491237/Put/seqid=0 2024-12-15T04:38:11,899 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-15T04:38:11,899 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-15T04:38:11,901 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-15T04:38:11,901 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-15T04:38:11,901 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-15T04:38:11,901 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-15T04:38:11,903 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-12-15T04:38:11,903 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-12-15T04:38:11,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741847_1023 (size=12001) 2024-12-15T04:38:11,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:11,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237551913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,920 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:11,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237551913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,921 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:11,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1734237551916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,924 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/9b8a342e34fd4009ad69f6711bf453cd 2024-12-15T04:38:11,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:11,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237551921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:11,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46250 deadline: 1734237551921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:11,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/205f75d217004b1c95064f134e018b82 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/205f75d217004b1c95064f134e018b82 2024-12-15T04:38:11,955 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/205f75d217004b1c95064f134e018b82, entries=150, sequenceid=50, filesize=11.7 K 2024-12-15T04:38:11,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/4c0ea44de593420487517b88784c9de5 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/4c0ea44de593420487517b88784c9de5 2024-12-15T04:38:11,983 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/4c0ea44de593420487517b88784c9de5, entries=150, sequenceid=50, filesize=11.7 K 2024-12-15T04:38:11,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/9b8a342e34fd4009ad69f6711bf453cd as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/9b8a342e34fd4009ad69f6711bf453cd 2024-12-15T04:38:12,001 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/9b8a342e34fd4009ad69f6711bf453cd, entries=150, sequenceid=50, filesize=11.7 K 2024-12-15T04:38:12,004 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 6443b0fc7191a86cb86de2a8c7e17f47 in 216ms, sequenceid=50, compaction requested=true 2024-12-15T04:38:12,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:12,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:12,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-12-15T04:38:12,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-12-15T04:38:12,010 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-15T04:38:12,011 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3900 sec 2024-12-15T04:38:12,016 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.4070 sec 2024-12-15T04:38:12,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:12,029 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-15T04:38:12,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:12,032 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:12,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:12,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:12,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:12,033 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:12,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:12,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:12,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237552043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:12,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46250 deadline: 1734237552041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:12,050 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/789043c3a72f4ad49eab4c575e4b7fa0 is 50, key is test_row_0/A:col10/1734237491913/Put/seqid=0 2024-12-15T04:38:12,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:12,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237552044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:12,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:12,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1734237552047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:12,055 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:12,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237552052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:12,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741848_1024 (size=14341) 2024-12-15T04:38:12,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:12,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237552151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:12,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:12,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46250 deadline: 1734237552155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:12,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:12,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237552156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:12,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:12,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237552157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:12,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:12,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1734237552157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:12,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:12,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237552356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:12,363 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:12,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46250 deadline: 1734237552361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:12,364 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:12,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237552362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:12,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:12,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1734237552364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:12,368 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:12,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237552365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:12,487 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/789043c3a72f4ad49eab4c575e4b7fa0 2024-12-15T04:38:12,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/c31054eefd084daca88aee8a4241a028 is 50, key is test_row_0/B:col10/1734237491913/Put/seqid=0 2024-12-15T04:38:12,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741849_1025 (size=12001) 2024-12-15T04:38:12,533 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/c31054eefd084daca88aee8a4241a028 2024-12-15T04:38:12,560 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/ea65fce7bfcf49e59d3a2289b9ed6e8b is 50, key is test_row_0/C:col10/1734237491913/Put/seqid=0 2024-12-15T04:38:12,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741850_1026 (size=12001) 2024-12-15T04:38:12,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:12,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237552663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:12,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:12,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46250 deadline: 1734237552666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:12,670 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:12,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237552668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:12,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:12,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237552672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:12,675 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:12,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1734237552672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:12,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-15T04:38:12,732 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-12-15T04:38:12,736 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:38:12,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-12-15T04:38:12,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-15T04:38:12,739 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:38:12,740 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:38:12,741 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:38:12,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-15T04:38:12,896 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:12,897 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-15T04:38:12,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:12,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:12,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:12,898 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:12,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:12,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:12,979 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/ea65fce7bfcf49e59d3a2289b9ed6e8b 2024-12-15T04:38:12,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/789043c3a72f4ad49eab4c575e4b7fa0 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/789043c3a72f4ad49eab4c575e4b7fa0 2024-12-15T04:38:13,011 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/789043c3a72f4ad49eab4c575e4b7fa0, entries=200, sequenceid=77, filesize=14.0 K 2024-12-15T04:38:13,013 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/c31054eefd084daca88aee8a4241a028 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/c31054eefd084daca88aee8a4241a028 2024-12-15T04:38:13,030 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/c31054eefd084daca88aee8a4241a028, entries=150, sequenceid=77, filesize=11.7 K 2024-12-15T04:38:13,035 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/ea65fce7bfcf49e59d3a2289b9ed6e8b as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/ea65fce7bfcf49e59d3a2289b9ed6e8b 2024-12-15T04:38:13,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-15T04:38:13,051 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/ea65fce7bfcf49e59d3a2289b9ed6e8b, entries=150, sequenceid=77, filesize=11.7 K 2024-12-15T04:38:13,051 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,052 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-15T04:38:13,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:13,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:13,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:13,053 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:13,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:13,054 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 6443b0fc7191a86cb86de2a8c7e17f47 in 1025ms, sequenceid=77, compaction requested=true 2024-12-15T04:38:13,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:13,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:13,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:38:13,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:13,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:38:13,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:13,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:38:13,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:13,056 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-15T04:38:13,056 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-15T04:38:13,063 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 52684 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-15T04:38:13,063 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-15T04:38:13,065 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/A is initiating minor compaction (all files) 2024-12-15T04:38:13,065 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/A in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:13,065 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/ca561e8d1c764e059f9537a5e5fae500, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/fd1e2eeb276e415295e07f9c004adc1d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/205f75d217004b1c95064f134e018b82, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/789043c3a72f4ad49eab4c575e4b7fa0] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=51.4 K 2024-12-15T04:38:13,067 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca561e8d1c764e059f9537a5e5fae500, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734237490665 2024-12-15T04:38:13,067 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/B is initiating minor compaction (all files) 2024-12-15T04:38:13,067 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/B in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:13,069 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd1e2eeb276e415295e07f9c004adc1d, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734237490826 2024-12-15T04:38:13,069 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/c23dd2cd6e544ed884327e48a89722e4, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/b896ef23c0f14231aa94eb3e073fae98, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/4c0ea44de593420487517b88784c9de5, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/c31054eefd084daca88aee8a4241a028] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=46.9 K 2024-12-15T04:38:13,069 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 205f75d217004b1c95064f134e018b82, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1734237491205 2024-12-15T04:38:13,069 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting c23dd2cd6e544ed884327e48a89722e4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734237490665 2024-12-15T04:38:13,071 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 789043c3a72f4ad49eab4c575e4b7fa0, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1734237491913 2024-12-15T04:38:13,071 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting b896ef23c0f14231aa94eb3e073fae98, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734237490826 2024-12-15T04:38:13,072 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c0ea44de593420487517b88784c9de5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1734237491205 2024-12-15T04:38:13,073 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting c31054eefd084daca88aee8a4241a028, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1734237491913 2024-12-15T04:38:13,121 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#B#compaction#12 average throughput is 0.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:13,123 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/e1c54d270b704035a03946fbfb5fb0e0 is 50, key is test_row_0/B:col10/1734237491913/Put/seqid=0 2024-12-15T04:38:13,127 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#A#compaction#13 average throughput is 0.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:13,128 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/b4384aa798334fccaf8fb47d5ee833ce is 50, key is test_row_0/A:col10/1734237491913/Put/seqid=0 2024-12-15T04:38:13,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741852_1028 (size=12139) 2024-12-15T04:38:13,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741851_1027 (size=12139) 2024-12-15T04:38:13,162 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/b4384aa798334fccaf8fb47d5ee833ce as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/b4384aa798334fccaf8fb47d5ee833ce 2024-12-15T04:38:13,165 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/e1c54d270b704035a03946fbfb5fb0e0 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/e1c54d270b704035a03946fbfb5fb0e0 2024-12-15T04:38:13,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:13,182 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-15T04:38:13,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:13,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:13,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:13,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:13,182 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:13,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:13,192 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/A of 6443b0fc7191a86cb86de2a8c7e17f47 into b4384aa798334fccaf8fb47d5ee833ce(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:13,192 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:13,192 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/A, priority=12, startTime=1734237493055; duration=0sec 2024-12-15T04:38:13,193 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:13,193 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:A 2024-12-15T04:38:13,193 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-15T04:38:13,194 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/B of 6443b0fc7191a86cb86de2a8c7e17f47 into e1c54d270b704035a03946fbfb5fb0e0(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:13,194 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:13,194 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/B, priority=12, startTime=1734237493056; duration=0sec 2024-12-15T04:38:13,194 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:13,194 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:B 2024-12-15T04:38:13,195 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/e980cff43fd8468ca170639eb73d3882 is 50, key is test_row_0/A:col10/1734237493179/Put/seqid=0 2024-12-15T04:38:13,199 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-15T04:38:13,199 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/C is initiating minor compaction (all files) 2024-12-15T04:38:13,199 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/C in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:13,199 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/11c541a1a63c41d79e75aae1ab212f71, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/90b6c7b01bf048e98fdb278de48a70b5, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/9b8a342e34fd4009ad69f6711bf453cd, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/ea65fce7bfcf49e59d3a2289b9ed6e8b] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=46.9 K 2024-12-15T04:38:13,201 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 11c541a1a63c41d79e75aae1ab212f71, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734237490665 2024-12-15T04:38:13,202 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 90b6c7b01bf048e98fdb278de48a70b5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734237490826 2024-12-15T04:38:13,204 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9b8a342e34fd4009ad69f6711bf453cd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1734237491205 2024-12-15T04:38:13,206 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,206 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-15T04:38:13,207 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting ea65fce7bfcf49e59d3a2289b9ed6e8b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1734237491913 2024-12-15T04:38:13,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:13,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:13,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:13,207 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:13,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:13,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:13,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741853_1029 (size=14341) 2024-12-15T04:38:13,229 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/e980cff43fd8468ca170639eb73d3882 2024-12-15T04:38:13,250 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/7fa0aa66aa014304ad0cff6ecc5bce1d is 50, key is test_row_0/B:col10/1734237493179/Put/seqid=0 2024-12-15T04:38:13,256 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#C#compaction#15 average throughput is 0.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:13,257 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/15fdac4b69df4f29b53f3f9dd4156fa6 is 50, key is test_row_0/C:col10/1734237491913/Put/seqid=0 2024-12-15T04:38:13,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741854_1030 (size=12001) 2024-12-15T04:38:13,282 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/7fa0aa66aa014304ad0cff6ecc5bce1d 2024-12-15T04:38:13,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:13,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237553271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:13,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237553273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:13,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237553269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741855_1031 (size=12139) 2024-12-15T04:38:13,295 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:13,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46250 deadline: 1734237553282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,296 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:13,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1734237553290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,305 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/15fdac4b69df4f29b53f3f9dd4156fa6 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/15fdac4b69df4f29b53f3f9dd4156fa6 2024-12-15T04:38:13,309 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/94c8543480a1468085434c6247f758d1 is 50, key is test_row_0/C:col10/1734237493179/Put/seqid=0 2024-12-15T04:38:13,318 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/C of 6443b0fc7191a86cb86de2a8c7e17f47 into 15fdac4b69df4f29b53f3f9dd4156fa6(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:13,318 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:13,318 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/C, priority=12, startTime=1734237493056; duration=0sec 2024-12-15T04:38:13,319 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:13,319 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:C 2024-12-15T04:38:13,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741856_1032 (size=12001) 2024-12-15T04:38:13,324 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/94c8543480a1468085434c6247f758d1 2024-12-15T04:38:13,339 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/e980cff43fd8468ca170639eb73d3882 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/e980cff43fd8468ca170639eb73d3882 2024-12-15T04:38:13,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-15T04:38:13,356 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/e980cff43fd8468ca170639eb73d3882, entries=200, sequenceid=88, filesize=14.0 K 2024-12-15T04:38:13,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/7fa0aa66aa014304ad0cff6ecc5bce1d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/7fa0aa66aa014304ad0cff6ecc5bce1d 2024-12-15T04:38:13,360 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,361 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-15T04:38:13,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:13,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:13,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:13,362 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:13,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:13,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:13,371 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/7fa0aa66aa014304ad0cff6ecc5bce1d, entries=150, sequenceid=88, filesize=11.7 K 2024-12-15T04:38:13,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/94c8543480a1468085434c6247f758d1 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/94c8543480a1468085434c6247f758d1 2024-12-15T04:38:13,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:13,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237553385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:13,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237553386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,394 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:13,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237553392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,398 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/94c8543480a1468085434c6247f758d1, entries=150, sequenceid=88, filesize=11.7 K 2024-12-15T04:38:13,403 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 6443b0fc7191a86cb86de2a8c7e17f47 in 221ms, sequenceid=88, compaction requested=false 2024-12-15T04:38:13,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:13,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:13,405 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-15T04:38:13,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:13,405 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:13,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:13,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:13,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:13,406 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:13,419 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/772c13b6a93d4b1682a9b1acd358f435 is 50, key is test_row_0/A:col10/1734237493278/Put/seqid=0 2024-12-15T04:38:13,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741857_1033 (size=12001) 2024-12-15T04:38:13,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:13,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1734237553423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:13,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46250 deadline: 1734237553432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,515 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,516 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-15T04:38:13,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:13,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:13,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:13,516 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:13,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:13,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:13,536 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:13,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1734237553534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:13,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46250 deadline: 1734237553538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:13,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237553590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,593 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:13,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237553591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,600 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:13,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237553597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,670 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,671 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-15T04:38:13,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:13,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:13,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:13,671 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:13,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:13,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:13,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:13,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1734237553740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:13,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46250 deadline: 1734237553746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,825 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,825 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-15T04:38:13,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:13,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:13,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:13,826 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:13,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:13,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:13,832 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/772c13b6a93d4b1682a9b1acd358f435 2024-12-15T04:38:13,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-15T04:38:13,857 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/a3bf88afbaaf49ef95da48667c661ed6 is 50, key is test_row_0/B:col10/1734237493278/Put/seqid=0 2024-12-15T04:38:13,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741858_1034 (size=12001) 2024-12-15T04:38:13,883 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/a3bf88afbaaf49ef95da48667c661ed6 2024-12-15T04:38:13,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:13,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237553897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:13,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237553898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,901 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/7c6b1b10806c42c6a37299fed6b10240 is 50, key is test_row_0/C:col10/1734237493278/Put/seqid=0 2024-12-15T04:38:13,905 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:13,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237553904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741859_1035 (size=12001) 2024-12-15T04:38:13,924 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/7c6b1b10806c42c6a37299fed6b10240 2024-12-15T04:38:13,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/772c13b6a93d4b1682a9b1acd358f435 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/772c13b6a93d4b1682a9b1acd358f435 2024-12-15T04:38:13,952 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/772c13b6a93d4b1682a9b1acd358f435, entries=150, sequenceid=118, filesize=11.7 K 2024-12-15T04:38:13,955 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/a3bf88afbaaf49ef95da48667c661ed6 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/a3bf88afbaaf49ef95da48667c661ed6 2024-12-15T04:38:13,966 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/a3bf88afbaaf49ef95da48667c661ed6, entries=150, sequenceid=118, filesize=11.7 K 2024-12-15T04:38:13,969 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/7c6b1b10806c42c6a37299fed6b10240 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/7c6b1b10806c42c6a37299fed6b10240 2024-12-15T04:38:13,979 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:13,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-15T04:38:13,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:13,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:13,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:13,981 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:13,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:13,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:13,986 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/7c6b1b10806c42c6a37299fed6b10240, entries=150, sequenceid=118, filesize=11.7 K 2024-12-15T04:38:13,989 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 6443b0fc7191a86cb86de2a8c7e17f47 in 583ms, sequenceid=118, compaction requested=true 2024-12-15T04:38:13,989 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:13,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:38:13,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:13,989 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:13,989 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:13,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:38:13,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:13,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:38:13,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:13,991 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:13,991 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/B is initiating minor compaction (all files) 2024-12-15T04:38:13,991 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/B in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:13,992 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/e1c54d270b704035a03946fbfb5fb0e0, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/7fa0aa66aa014304ad0cff6ecc5bce1d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/a3bf88afbaaf49ef95da48667c661ed6] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=35.3 K 2024-12-15T04:38:13,993 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:13,994 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/A is initiating minor compaction (all files) 2024-12-15T04:38:13,994 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/A in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:13,994 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/b4384aa798334fccaf8fb47d5ee833ce, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/e980cff43fd8468ca170639eb73d3882, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/772c13b6a93d4b1682a9b1acd358f435] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=37.6 K 2024-12-15T04:38:13,994 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting e1c54d270b704035a03946fbfb5fb0e0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1734237491913 2024-12-15T04:38:13,996 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4384aa798334fccaf8fb47d5ee833ce, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1734237491913 2024-12-15T04:38:13,996 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 7fa0aa66aa014304ad0cff6ecc5bce1d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1734237492041 2024-12-15T04:38:13,997 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting e980cff43fd8468ca170639eb73d3882, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1734237492041 2024-12-15T04:38:13,997 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting a3bf88afbaaf49ef95da48667c661ed6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734237493270 2024-12-15T04:38:14,000 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 772c13b6a93d4b1682a9b1acd358f435, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734237493270 2024-12-15T04:38:14,022 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#B#compaction#21 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:14,023 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/2d1656dd8af54a0e8bffdff913eccb59 is 50, key is test_row_0/B:col10/1734237493278/Put/seqid=0 2024-12-15T04:38:14,029 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#A#compaction#22 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:14,030 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/cf85e948db6340eb89be499dfdd41b36 is 50, key is test_row_0/A:col10/1734237493278/Put/seqid=0 2024-12-15T04:38:14,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741861_1037 (size=12241) 2024-12-15T04:38:14,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741860_1036 (size=12241) 2024-12-15T04:38:14,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:14,055 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-15T04:38:14,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:14,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:14,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:14,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:14,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:14,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:14,064 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/cf85e948db6340eb89be499dfdd41b36 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/cf85e948db6340eb89be499dfdd41b36 2024-12-15T04:38:14,065 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/9e9babc9515c45abb5a5c5b5da551edc is 50, key is test_row_0/A:col10/1734237494053/Put/seqid=0 2024-12-15T04:38:14,071 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/2d1656dd8af54a0e8bffdff913eccb59 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/2d1656dd8af54a0e8bffdff913eccb59 2024-12-15T04:38:14,084 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/B of 6443b0fc7191a86cb86de2a8c7e17f47 into 2d1656dd8af54a0e8bffdff913eccb59(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:14,085 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:14,085 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/B, priority=13, startTime=1734237493989; duration=0sec 2024-12-15T04:38:14,085 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/A of 6443b0fc7191a86cb86de2a8c7e17f47 into cf85e948db6340eb89be499dfdd41b36(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:14,085 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:14,085 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:14,085 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/A, priority=13, startTime=1734237493989; duration=0sec 2024-12-15T04:38:14,085 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:B 2024-12-15T04:38:14,085 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:14,085 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:14,085 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:A 2024-12-15T04:38:14,087 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:14,088 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/C is initiating minor compaction (all files) 2024-12-15T04:38:14,088 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/C in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:14,089 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/15fdac4b69df4f29b53f3f9dd4156fa6, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/94c8543480a1468085434c6247f758d1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/7c6b1b10806c42c6a37299fed6b10240] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=35.3 K 2024-12-15T04:38:14,089 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 15fdac4b69df4f29b53f3f9dd4156fa6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=77, earliestPutTs=1734237491913 2024-12-15T04:38:14,091 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 94c8543480a1468085434c6247f758d1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1734237492041 2024-12-15T04:38:14,091 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c6b1b10806c42c6a37299fed6b10240, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734237493270 2024-12-15T04:38:14,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741862_1038 (size=14391) 2024-12-15T04:38:14,105 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/9e9babc9515c45abb5a5c5b5da551edc 2024-12-15T04:38:14,111 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#C#compaction#24 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:14,112 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/ec4f745b9a5e4071b5969e05bd149166 is 50, key is test_row_0/C:col10/1734237493278/Put/seqid=0 2024-12-15T04:38:14,130 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:14,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46250 deadline: 1734237554126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:14,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:14,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1734237554129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:14,133 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:14,134 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-15T04:38:14,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:14,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:14,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:14,135 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:14,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:14,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:14,142 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/a7fb5a8637ae42b1be1dc256d533c383 is 50, key is test_row_0/B:col10/1734237494053/Put/seqid=0 2024-12-15T04:38:14,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741864_1040 (size=12051) 2024-12-15T04:38:14,154 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/a7fb5a8637ae42b1be1dc256d533c383 2024-12-15T04:38:14,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741863_1039 (size=12241) 2024-12-15T04:38:14,173 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/067918eec2f4425d8c6d4047d721e827 is 50, key is test_row_0/C:col10/1734237494053/Put/seqid=0 2024-12-15T04:38:14,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741865_1041 (size=12051) 2024-12-15T04:38:14,204 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/067918eec2f4425d8c6d4047d721e827 2024-12-15T04:38:14,237 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:14,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1734237554236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:14,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:14,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46250 deadline: 1734237554236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:14,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/9e9babc9515c45abb5a5c5b5da551edc as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/9e9babc9515c45abb5a5c5b5da551edc 2024-12-15T04:38:14,248 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/9e9babc9515c45abb5a5c5b5da551edc, entries=200, sequenceid=129, filesize=14.1 K 2024-12-15T04:38:14,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/a7fb5a8637ae42b1be1dc256d533c383 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/a7fb5a8637ae42b1be1dc256d533c383 2024-12-15T04:38:14,261 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/a7fb5a8637ae42b1be1dc256d533c383, entries=150, sequenceid=129, filesize=11.8 K 2024-12-15T04:38:14,264 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/067918eec2f4425d8c6d4047d721e827 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/067918eec2f4425d8c6d4047d721e827 2024-12-15T04:38:14,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/067918eec2f4425d8c6d4047d721e827, entries=150, sequenceid=129, filesize=11.8 K 2024-12-15T04:38:14,277 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 6443b0fc7191a86cb86de2a8c7e17f47 in 222ms, sequenceid=129, compaction requested=false 2024-12-15T04:38:14,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:14,287 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:14,289 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-12-15T04:38:14,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:14,289 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-15T04:38:14,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:14,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:14,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:14,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:14,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:14,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:14,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/30a1300b078c4f75aef4d4179507b1d1 is 50, key is test_row_0/A:col10/1734237494127/Put/seqid=0 2024-12-15T04:38:14,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741866_1042 (size=12151) 2024-12-15T04:38:14,340 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/30a1300b078c4f75aef4d4179507b1d1 2024-12-15T04:38:14,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/3c94270c922144999d0fbcf206e8bbce is 50, key is test_row_0/B:col10/1734237494127/Put/seqid=0 2024-12-15T04:38:14,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741867_1043 (size=12151) 2024-12-15T04:38:14,383 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/3c94270c922144999d0fbcf206e8bbce 2024-12-15T04:38:14,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/a24ff2cf0d324b20a34e0c11d684415f is 50, key is test_row_0/C:col10/1734237494127/Put/seqid=0 2024-12-15T04:38:14,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:14,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:14,422 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:14,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237554420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:14,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:14,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237554422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:14,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:14,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237554422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:14,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741868_1044 (size=12151) 2024-12-15T04:38:14,444 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:14,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1734237554441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:14,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:14,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46250 deadline: 1734237554443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:14,529 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:14,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237554525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:14,529 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:14,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:14,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237554527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:14,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237554528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:14,578 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/ec4f745b9a5e4071b5969e05bd149166 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/ec4f745b9a5e4071b5969e05bd149166 2024-12-15T04:38:14,592 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/C of 6443b0fc7191a86cb86de2a8c7e17f47 into ec4f745b9a5e4071b5969e05bd149166(size=12.0 K), total size for store is 23.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:14,593 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:14,593 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/C, priority=13, startTime=1734237493990; duration=0sec 2024-12-15T04:38:14,593 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:14,593 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:C 2024-12-15T04:38:14,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:14,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237554733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:14,742 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:14,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237554740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:14,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:14,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237554740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:14,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:14,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1734237554747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:14,752 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:14,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46250 deadline: 1734237554749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:14,835 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/a24ff2cf0d324b20a34e0c11d684415f 2024-12-15T04:38:14,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-15T04:38:14,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/30a1300b078c4f75aef4d4179507b1d1 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/30a1300b078c4f75aef4d4179507b1d1 2024-12-15T04:38:14,866 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/30a1300b078c4f75aef4d4179507b1d1, entries=150, sequenceid=156, filesize=11.9 K 2024-12-15T04:38:14,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/3c94270c922144999d0fbcf206e8bbce as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/3c94270c922144999d0fbcf206e8bbce 2024-12-15T04:38:14,887 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/3c94270c922144999d0fbcf206e8bbce, entries=150, sequenceid=156, filesize=11.9 K 2024-12-15T04:38:14,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/a24ff2cf0d324b20a34e0c11d684415f as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/a24ff2cf0d324b20a34e0c11d684415f 2024-12-15T04:38:14,902 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/a24ff2cf0d324b20a34e0c11d684415f, entries=150, sequenceid=156, filesize=11.9 K 2024-12-15T04:38:14,904 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 6443b0fc7191a86cb86de2a8c7e17f47 in 614ms, sequenceid=156, compaction requested=true 2024-12-15T04:38:14,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:14,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:14,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-12-15T04:38:14,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-12-15T04:38:14,909 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-12-15T04:38:14,909 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1660 sec 2024-12-15T04:38:14,911 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 2.1740 sec 2024-12-15T04:38:15,046 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-15T04:38:15,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:15,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:15,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:15,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:15,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:15,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:15,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:15,056 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/9804398af3c84c4fb673d0a0fab071ce is 50, key is test_row_0/A:col10/1734237495045/Put/seqid=0 2024-12-15T04:38:15,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741869_1045 (size=12151) 2024-12-15T04:38:15,080 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/9804398af3c84c4fb673d0a0fab071ce 2024-12-15T04:38:15,094 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/cb9da5981c904e5492c6625b9601321b is 50, key is test_row_0/B:col10/1734237495045/Put/seqid=0 2024-12-15T04:38:15,099 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:15,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237555095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:15,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:15,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237555098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:15,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:15,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237555099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:15,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741870_1046 (size=12151) 2024-12-15T04:38:15,112 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/cb9da5981c904e5492c6625b9601321b 2024-12-15T04:38:15,127 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/1959a4b6f9774878b5889b5716426007 is 50, key is test_row_0/C:col10/1734237495045/Put/seqid=0 2024-12-15T04:38:15,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741871_1047 (size=12151) 2024-12-15T04:38:15,143 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/1959a4b6f9774878b5889b5716426007 2024-12-15T04:38:15,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/9804398af3c84c4fb673d0a0fab071ce as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/9804398af3c84c4fb673d0a0fab071ce 2024-12-15T04:38:15,172 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/9804398af3c84c4fb673d0a0fab071ce, entries=150, sequenceid=171, filesize=11.9 K 2024-12-15T04:38:15,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/cb9da5981c904e5492c6625b9601321b as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/cb9da5981c904e5492c6625b9601321b 2024-12-15T04:38:15,191 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/cb9da5981c904e5492c6625b9601321b, entries=150, sequenceid=171, filesize=11.9 K 2024-12-15T04:38:15,193 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/1959a4b6f9774878b5889b5716426007 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/1959a4b6f9774878b5889b5716426007 2024-12-15T04:38:15,204 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/1959a4b6f9774878b5889b5716426007, entries=150, sequenceid=171, filesize=11.9 K 2024-12-15T04:38:15,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:15,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237555201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:15,206 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 6443b0fc7191a86cb86de2a8c7e17f47 in 160ms, sequenceid=171, compaction requested=true 2024-12-15T04:38:15,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:15,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:15,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:38:15,207 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-15T04:38:15,207 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:15,207 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-15T04:38:15,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:38:15,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:15,211 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50934 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-15T04:38:15,211 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48594 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-15T04:38:15,211 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/A is initiating minor compaction (all files) 2024-12-15T04:38:15,211 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/B is initiating minor compaction (all files) 2024-12-15T04:38:15,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:38:15,211 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/A in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:15,211 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/B in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:15,211 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:15,211 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/2d1656dd8af54a0e8bffdff913eccb59, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/a7fb5a8637ae42b1be1dc256d533c383, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/3c94270c922144999d0fbcf206e8bbce, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/cb9da5981c904e5492c6625b9601321b] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=47.5 K 2024-12-15T04:38:15,211 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/cf85e948db6340eb89be499dfdd41b36, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/9e9babc9515c45abb5a5c5b5da551edc, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/30a1300b078c4f75aef4d4179507b1d1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/9804398af3c84c4fb673d0a0fab071ce] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=49.7 K 2024-12-15T04:38:15,211 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-15T04:38:15,213 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d1656dd8af54a0e8bffdff913eccb59, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734237493270 2024-12-15T04:38:15,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:15,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:15,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:15,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:15,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:15,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:15,214 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf85e948db6340eb89be499dfdd41b36, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734237493270 2024-12-15T04:38:15,215 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting a7fb5a8637ae42b1be1dc256d533c383, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734237493414 2024-12-15T04:38:15,216 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e9babc9515c45abb5a5c5b5da551edc, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734237493414 2024-12-15T04:38:15,217 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c94270c922144999d0fbcf206e8bbce, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1734237494106 2024-12-15T04:38:15,217 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 30a1300b078c4f75aef4d4179507b1d1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1734237494106 2024-12-15T04:38:15,218 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting cb9da5981c904e5492c6625b9601321b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734237494420 2024-12-15T04:38:15,218 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9804398af3c84c4fb673d0a0fab071ce, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734237494420 2024-12-15T04:38:15,225 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/6a63db6375324632b788ba68b136872f is 50, key is test_row_0/A:col10/1734237495210/Put/seqid=0 2024-12-15T04:38:15,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:15,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237555233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:15,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:15,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237555235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:15,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:15,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1734237555255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:15,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741872_1048 (size=12151) 2024-12-15T04:38:15,262 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/6a63db6375324632b788ba68b136872f 2024-12-15T04:38:15,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:15,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46250 deadline: 1734237555260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:15,272 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#A#compaction#34 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:15,273 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/149af2119d0c4937ad9458c00092494f is 50, key is test_row_0/A:col10/1734237495045/Put/seqid=0 2024-12-15T04:38:15,285 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#B#compaction#35 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:15,286 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/32b223adf2af450b9e3b06d1ef466618 is 50, key is test_row_0/B:col10/1734237495210/Put/seqid=0 2024-12-15T04:38:15,286 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/e3752d5b939244baa78a56807d567f74 is 50, key is test_row_0/B:col10/1734237495045/Put/seqid=0 2024-12-15T04:38:15,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741874_1050 (size=12527) 2024-12-15T04:38:15,325 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/e3752d5b939244baa78a56807d567f74 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/e3752d5b939244baa78a56807d567f74 2024-12-15T04:38:15,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741875_1051 (size=12151) 2024-12-15T04:38:15,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741873_1049 (size=12527) 2024-12-15T04:38:15,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:15,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237555337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:15,339 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:15,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237555338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:15,345 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/B of 6443b0fc7191a86cb86de2a8c7e17f47 into e3752d5b939244baa78a56807d567f74(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:15,345 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:15,345 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/B, priority=12, startTime=1734237495207; duration=0sec 2024-12-15T04:38:15,345 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:15,345 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:B 2024-12-15T04:38:15,345 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-15T04:38:15,348 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/149af2119d0c4937ad9458c00092494f as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/149af2119d0c4937ad9458c00092494f 2024-12-15T04:38:15,349 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48594 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-15T04:38:15,350 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/C is initiating minor compaction (all files) 2024-12-15T04:38:15,350 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/C in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:15,350 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/ec4f745b9a5e4071b5969e05bd149166, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/067918eec2f4425d8c6d4047d721e827, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/a24ff2cf0d324b20a34e0c11d684415f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/1959a4b6f9774878b5889b5716426007] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=47.5 K 2024-12-15T04:38:15,351 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting ec4f745b9a5e4071b5969e05bd149166, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734237493270 2024-12-15T04:38:15,356 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 067918eec2f4425d8c6d4047d721e827, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734237493414 2024-12-15T04:38:15,357 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting a24ff2cf0d324b20a34e0c11d684415f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1734237494106 2024-12-15T04:38:15,358 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 1959a4b6f9774878b5889b5716426007, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734237494420 2024-12-15T04:38:15,359 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/A of 6443b0fc7191a86cb86de2a8c7e17f47 into 149af2119d0c4937ad9458c00092494f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:15,361 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:15,361 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/A, priority=12, startTime=1734237495207; duration=0sec 2024-12-15T04:38:15,362 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:15,362 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:A 2024-12-15T04:38:15,375 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#C#compaction#37 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:15,376 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/a5ce8476c9e8447f90faf66a9729a5e8 is 50, key is test_row_0/C:col10/1734237495045/Put/seqid=0 2024-12-15T04:38:15,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741876_1052 (size=12527) 2024-12-15T04:38:15,405 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/a5ce8476c9e8447f90faf66a9729a5e8 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/a5ce8476c9e8447f90faf66a9729a5e8 2024-12-15T04:38:15,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:15,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237555407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:15,417 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/C of 6443b0fc7191a86cb86de2a8c7e17f47 into a5ce8476c9e8447f90faf66a9729a5e8(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:15,417 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:15,417 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/C, priority=12, startTime=1734237495211; duration=0sec 2024-12-15T04:38:15,418 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:15,418 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:C 2024-12-15T04:38:15,541 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:15,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237555541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:15,543 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:15,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237555542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:15,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:15,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237555712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:15,728 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/32b223adf2af450b9e3b06d1ef466618 2024-12-15T04:38:15,741 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/091f586ce84a4b54a36949b7ac6939db is 50, key is test_row_0/C:col10/1734237495210/Put/seqid=0 2024-12-15T04:38:15,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741877_1053 (size=12151) 2024-12-15T04:38:15,763 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/091f586ce84a4b54a36949b7ac6939db 2024-12-15T04:38:15,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/6a63db6375324632b788ba68b136872f as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/6a63db6375324632b788ba68b136872f 2024-12-15T04:38:15,782 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/6a63db6375324632b788ba68b136872f, entries=150, sequenceid=197, filesize=11.9 K 2024-12-15T04:38:15,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/32b223adf2af450b9e3b06d1ef466618 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/32b223adf2af450b9e3b06d1ef466618 2024-12-15T04:38:15,797 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/32b223adf2af450b9e3b06d1ef466618, entries=150, sequenceid=197, filesize=11.9 K 2024-12-15T04:38:15,798 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/091f586ce84a4b54a36949b7ac6939db as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/091f586ce84a4b54a36949b7ac6939db 2024-12-15T04:38:15,808 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/091f586ce84a4b54a36949b7ac6939db, entries=150, sequenceid=197, filesize=11.9 K 2024-12-15T04:38:15,810 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 6443b0fc7191a86cb86de2a8c7e17f47 in 599ms, sequenceid=197, compaction requested=false 2024-12-15T04:38:15,810 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:15,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:15,846 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-15T04:38:15,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:15,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:15,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:15,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:15,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:15,847 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:15,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/52018197caba41c1b9c4198dbb27a650 is 50, key is test_row_0/A:col10/1734237495845/Put/seqid=0 2024-12-15T04:38:15,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741878_1054 (size=12147) 2024-12-15T04:38:15,862 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/52018197caba41c1b9c4198dbb27a650 2024-12-15T04:38:15,875 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/8be34ce6dc05476e9ff3d757a7d7d34c is 50, key is test_row_0/B:col10/1734237495845/Put/seqid=0 2024-12-15T04:38:15,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741879_1055 (size=9757) 2024-12-15T04:38:15,899 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/8be34ce6dc05476e9ff3d757a7d7d34c 2024-12-15T04:38:15,903 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:15,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237555897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:15,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:15,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237555899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:15,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/6b7beb493bc5466aaa6bcdd63c4e9ac6 is 50, key is test_row_0/C:col10/1734237495845/Put/seqid=0 2024-12-15T04:38:15,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741880_1056 (size=9757) 2024-12-15T04:38:16,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:16,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237556005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:16,011 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:16,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237556007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:16,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:16,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237556212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:16,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:16,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:16,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237556215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:16,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237556215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:16,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:16,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1734237556260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:16,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:16,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46250 deadline: 1734237556271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:16,334 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=212 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/6b7beb493bc5466aaa6bcdd63c4e9ac6 2024-12-15T04:38:16,342 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/52018197caba41c1b9c4198dbb27a650 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/52018197caba41c1b9c4198dbb27a650 2024-12-15T04:38:16,349 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/52018197caba41c1b9c4198dbb27a650, entries=150, sequenceid=212, filesize=11.9 K 2024-12-15T04:38:16,350 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/8be34ce6dc05476e9ff3d757a7d7d34c as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/8be34ce6dc05476e9ff3d757a7d7d34c 2024-12-15T04:38:16,365 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/8be34ce6dc05476e9ff3d757a7d7d34c, entries=100, sequenceid=212, filesize=9.5 K 2024-12-15T04:38:16,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/6b7beb493bc5466aaa6bcdd63c4e9ac6 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/6b7beb493bc5466aaa6bcdd63c4e9ac6 2024-12-15T04:38:16,375 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/6b7beb493bc5466aaa6bcdd63c4e9ac6, entries=100, sequenceid=212, filesize=9.5 K 2024-12-15T04:38:16,378 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 6443b0fc7191a86cb86de2a8c7e17f47 in 532ms, sequenceid=212, compaction requested=true 2024-12-15T04:38:16,378 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:16,378 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:16,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:38:16,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:16,379 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:16,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:38:16,380 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36825 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:16,380 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34435 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:16,380 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/A is initiating minor compaction (all files) 2024-12-15T04:38:16,380 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/B is initiating minor compaction (all files) 2024-12-15T04:38:16,380 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/A in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:16,380 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/B in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:16,380 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/149af2119d0c4937ad9458c00092494f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/6a63db6375324632b788ba68b136872f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/52018197caba41c1b9c4198dbb27a650] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=36.0 K 2024-12-15T04:38:16,380 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/e3752d5b939244baa78a56807d567f74, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/32b223adf2af450b9e3b06d1ef466618, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/8be34ce6dc05476e9ff3d757a7d7d34c] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=33.6 K 2024-12-15T04:38:16,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:16,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:38:16,381 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:16,381 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 149af2119d0c4937ad9458c00092494f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734237494420 2024-12-15T04:38:16,381 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting e3752d5b939244baa78a56807d567f74, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734237494420 2024-12-15T04:38:16,382 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a63db6375324632b788ba68b136872f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1734237495096 2024-12-15T04:38:16,382 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 32b223adf2af450b9e3b06d1ef466618, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1734237495096 2024-12-15T04:38:16,382 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 8be34ce6dc05476e9ff3d757a7d7d34c, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1734237495219 2024-12-15T04:38:16,382 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52018197caba41c1b9c4198dbb27a650, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1734237495219 2024-12-15T04:38:16,401 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#A#compaction#42 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:16,402 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/0fdf702de5b347429651e45bf6146446 is 50, key is test_row_0/A:col10/1734237495845/Put/seqid=0 2024-12-15T04:38:16,405 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#B#compaction#43 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:16,406 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/e504f21b109c461aaaba8324622c5499 is 50, key is test_row_0/B:col10/1734237495845/Put/seqid=0 2024-12-15T04:38:16,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741881_1057 (size=12629) 2024-12-15T04:38:16,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741882_1058 (size=12629) 2024-12-15T04:38:16,450 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/e504f21b109c461aaaba8324622c5499 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/e504f21b109c461aaaba8324622c5499 2024-12-15T04:38:16,486 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/B of 6443b0fc7191a86cb86de2a8c7e17f47 into e504f21b109c461aaaba8324622c5499(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:16,487 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:16,487 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/B, priority=13, startTime=1734237496379; duration=0sec 2024-12-15T04:38:16,487 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:16,487 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:B 2024-12-15T04:38:16,487 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:16,489 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34435 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:16,489 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/C is initiating minor compaction (all files) 2024-12-15T04:38:16,489 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/C in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:16,489 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/a5ce8476c9e8447f90faf66a9729a5e8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/091f586ce84a4b54a36949b7ac6939db, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/6b7beb493bc5466aaa6bcdd63c4e9ac6] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=33.6 K 2024-12-15T04:38:16,490 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting a5ce8476c9e8447f90faf66a9729a5e8, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734237494420 2024-12-15T04:38:16,490 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 091f586ce84a4b54a36949b7ac6939db, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1734237495096 2024-12-15T04:38:16,492 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b7beb493bc5466aaa6bcdd63c4e9ac6, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1734237495219 2024-12-15T04:38:16,513 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#C#compaction#44 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:16,514 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/b88127cd3e194d39bc32567f2e436a2b is 50, key is test_row_0/C:col10/1734237495845/Put/seqid=0 2024-12-15T04:38:16,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:16,518 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-15T04:38:16,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:16,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:16,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:16,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:16,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:16,519 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:16,534 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/655f6b0a7c224bc6bc468a228c13589a is 50, key is test_row_0/A:col10/1734237496516/Put/seqid=0 2024-12-15T04:38:16,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:16,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237556537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:16,539 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:16,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237556537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:16,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741883_1059 (size=12629) 2024-12-15T04:38:16,559 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/b88127cd3e194d39bc32567f2e436a2b as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/b88127cd3e194d39bc32567f2e436a2b 2024-12-15T04:38:16,572 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/C of 6443b0fc7191a86cb86de2a8c7e17f47 into b88127cd3e194d39bc32567f2e436a2b(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:16,572 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:16,572 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/C, priority=13, startTime=1734237496381; duration=0sec 2024-12-15T04:38:16,572 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:16,572 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:C 2024-12-15T04:38:16,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741884_1060 (size=14541) 2024-12-15T04:38:16,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:16,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237556641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:16,642 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:16,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237556642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:16,830 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/0fdf702de5b347429651e45bf6146446 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/0fdf702de5b347429651e45bf6146446 2024-12-15T04:38:16,837 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/A of 6443b0fc7191a86cb86de2a8c7e17f47 into 0fdf702de5b347429651e45bf6146446(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:16,837 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:16,837 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/A, priority=13, startTime=1734237496378; duration=0sec 2024-12-15T04:38:16,837 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:16,837 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:A 2024-12-15T04:38:16,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:16,845 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:16,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237556843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:16,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237556844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:16,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-12-15T04:38:16,846 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-12-15T04:38:16,847 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:38:16,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-12-15T04:38:16,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-15T04:38:16,850 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:38:16,851 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:38:16,851 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:38:16,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-15T04:38:16,974 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/655f6b0a7c224bc6bc468a228c13589a 2024-12-15T04:38:16,984 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/f8160a3872d7427182507fd059c7ec13 is 50, key is test_row_0/B:col10/1734237496516/Put/seqid=0 2024-12-15T04:38:16,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741885_1061 (size=12151) 2024-12-15T04:38:17,004 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:17,004 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-15T04:38:17,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:17,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:17,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:17,004 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:17,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:17,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:17,148 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:17,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:17,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237557147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:17,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237557148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:17,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-15T04:38:17,157 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:17,158 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-15T04:38:17,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:17,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:17,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:17,158 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:17,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:17,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:17,220 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:17,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237557219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:17,311 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:17,311 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-15T04:38:17,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:17,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:17,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:17,312 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:17,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:17,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:17,395 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/f8160a3872d7427182507fd059c7ec13 2024-12-15T04:38:17,410 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/fd65579037744fd5b26da1a50f75e059 is 50, key is test_row_0/C:col10/1734237496516/Put/seqid=0 2024-12-15T04:38:17,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741886_1062 (size=12151) 2024-12-15T04:38:17,431 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/fd65579037744fd5b26da1a50f75e059 2024-12-15T04:38:17,440 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/655f6b0a7c224bc6bc468a228c13589a as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/655f6b0a7c224bc6bc468a228c13589a 2024-12-15T04:38:17,448 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/655f6b0a7c224bc6bc468a228c13589a, entries=200, sequenceid=238, filesize=14.2 K 2024-12-15T04:38:17,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/f8160a3872d7427182507fd059c7ec13 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/f8160a3872d7427182507fd059c7ec13 2024-12-15T04:38:17,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-15T04:38:17,458 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/f8160a3872d7427182507fd059c7ec13, entries=150, sequenceid=238, filesize=11.9 K 2024-12-15T04:38:17,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/fd65579037744fd5b26da1a50f75e059 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/fd65579037744fd5b26da1a50f75e059 2024-12-15T04:38:17,465 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:17,465 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-15T04:38:17,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:17,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:17,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:17,466 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:17,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:17,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:17,473 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/fd65579037744fd5b26da1a50f75e059, entries=150, sequenceid=238, filesize=11.9 K 2024-12-15T04:38:17,474 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 6443b0fc7191a86cb86de2a8c7e17f47 in 956ms, sequenceid=238, compaction requested=false 2024-12-15T04:38:17,474 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:17,620 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:17,620 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-12-15T04:38:17,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:17,621 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-15T04:38:17,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:17,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:17,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:17,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:17,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:17,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:17,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/94ff6d0cdfa54b03922c6f972dc2f43f is 50, key is test_row_0/A:col10/1734237496527/Put/seqid=0 2024-12-15T04:38:17,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741887_1063 (size=12151) 2024-12-15T04:38:17,654 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:17,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:17,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:17,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237557699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:17,705 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:17,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237557700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:17,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:17,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237557802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:17,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:17,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237557806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:17,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-15T04:38:18,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:18,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237558005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:18,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:18,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237558010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:18,042 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/94ff6d0cdfa54b03922c6f972dc2f43f 2024-12-15T04:38:18,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/9f2b0b387a134143bcc26c9c7fd54e86 is 50, key is test_row_0/B:col10/1734237496527/Put/seqid=0 2024-12-15T04:38:18,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741888_1064 (size=12151) 2024-12-15T04:38:18,281 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:18,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1734237558281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:18,282 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4153 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., hostname=e56de37b85b3,43199,1734237482035, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:38:18,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:18,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46250 deadline: 1734237558284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:18,285 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4159 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., hostname=e56de37b85b3,43199,1734237482035, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:38:18,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:18,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237558308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:18,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:18,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237558312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:18,482 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/9f2b0b387a134143bcc26c9c7fd54e86 2024-12-15T04:38:18,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/9406626a5b624ed18decaecaaae66562 is 50, key is test_row_0/C:col10/1734237496527/Put/seqid=0 2024-12-15T04:38:18,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741889_1065 (size=12151) 2024-12-15T04:38:18,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:18,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237558812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:18,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:18,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237558814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:18,922 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/9406626a5b624ed18decaecaaae66562 2024-12-15T04:38:18,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/94ff6d0cdfa54b03922c6f972dc2f43f as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/94ff6d0cdfa54b03922c6f972dc2f43f 2024-12-15T04:38:18,935 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/94ff6d0cdfa54b03922c6f972dc2f43f, entries=150, sequenceid=251, filesize=11.9 K 2024-12-15T04:38:18,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/9f2b0b387a134143bcc26c9c7fd54e86 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/9f2b0b387a134143bcc26c9c7fd54e86 2024-12-15T04:38:18,945 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/9f2b0b387a134143bcc26c9c7fd54e86, entries=150, sequenceid=251, filesize=11.9 K 2024-12-15T04:38:18,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/9406626a5b624ed18decaecaaae66562 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/9406626a5b624ed18decaecaaae66562 2024-12-15T04:38:18,953 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/9406626a5b624ed18decaecaaae66562, entries=150, sequenceid=251, filesize=11.9 K 2024-12-15T04:38:18,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-15T04:38:18,956 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 6443b0fc7191a86cb86de2a8c7e17f47 in 1335ms, sequenceid=251, compaction requested=true 2024-12-15T04:38:18,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:18,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:18,956 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-12-15T04:38:18,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-12-15T04:38:18,960 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-12-15T04:38:18,961 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1070 sec 2024-12-15T04:38:18,965 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 2.1150 sec 2024-12-15T04:38:19,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:19,233 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-15T04:38:19,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:19,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:19,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:19,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:19,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:19,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:19,239 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/5cf74a1384684e4e8f1af1b64b15442a is 50, key is test_row_0/A:col10/1734237499231/Put/seqid=0 2024-12-15T04:38:19,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741890_1066 (size=12301) 2024-12-15T04:38:19,252 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:19,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237559252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:19,355 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:19,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237559354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:19,558 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:19,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237559557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:19,647 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/5cf74a1384684e4e8f1af1b64b15442a 2024-12-15T04:38:19,672 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/0983e0f198de41658ef78c6fc4c4ff21 is 50, key is test_row_0/B:col10/1734237499231/Put/seqid=0 2024-12-15T04:38:19,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741891_1067 (size=12301) 2024-12-15T04:38:19,690 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/0983e0f198de41658ef78c6fc4c4ff21 2024-12-15T04:38:19,699 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/e9a0a99e729644cead3ea33cb0bf88d6 is 50, key is test_row_0/C:col10/1734237499231/Put/seqid=0 2024-12-15T04:38:19,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741892_1068 (size=12301) 2024-12-15T04:38:19,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:19,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237559820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:19,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:19,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237559825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:19,862 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:19,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237559861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:20,107 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/e9a0a99e729644cead3ea33cb0bf88d6 2024-12-15T04:38:20,115 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/5cf74a1384684e4e8f1af1b64b15442a as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/5cf74a1384684e4e8f1af1b64b15442a 2024-12-15T04:38:20,121 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/5cf74a1384684e4e8f1af1b64b15442a, entries=150, sequenceid=277, filesize=12.0 K 2024-12-15T04:38:20,124 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/0983e0f198de41658ef78c6fc4c4ff21 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/0983e0f198de41658ef78c6fc4c4ff21 2024-12-15T04:38:20,132 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/0983e0f198de41658ef78c6fc4c4ff21, entries=150, sequenceid=277, filesize=12.0 K 2024-12-15T04:38:20,134 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/e9a0a99e729644cead3ea33cb0bf88d6 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/e9a0a99e729644cead3ea33cb0bf88d6 2024-12-15T04:38:20,141 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/e9a0a99e729644cead3ea33cb0bf88d6, entries=150, sequenceid=277, filesize=12.0 K 2024-12-15T04:38:20,143 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 6443b0fc7191a86cb86de2a8c7e17f47 in 910ms, sequenceid=277, compaction requested=true 2024-12-15T04:38:20,143 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:20,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:38:20,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:20,143 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-15T04:38:20,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:38:20,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:20,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:38:20,143 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-15T04:38:20,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:20,145 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51622 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-15T04:38:20,145 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/A is initiating minor compaction (all files) 2024-12-15T04:38:20,145 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/A in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:20,145 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/0fdf702de5b347429651e45bf6146446, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/655f6b0a7c224bc6bc468a228c13589a, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/94ff6d0cdfa54b03922c6f972dc2f43f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/5cf74a1384684e4e8f1af1b64b15442a] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=50.4 K 2024-12-15T04:38:20,146 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49232 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-15T04:38:20,146 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/B is initiating minor compaction (all files) 2024-12-15T04:38:20,146 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/B in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:20,146 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/e504f21b109c461aaaba8324622c5499, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/f8160a3872d7427182507fd059c7ec13, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/9f2b0b387a134143bcc26c9c7fd54e86, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/0983e0f198de41658ef78c6fc4c4ff21] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=48.1 K 2024-12-15T04:38:20,146 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0fdf702de5b347429651e45bf6146446, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1734237495096 2024-12-15T04:38:20,147 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting e504f21b109c461aaaba8324622c5499, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1734237495096 2024-12-15T04:38:20,147 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 655f6b0a7c224bc6bc468a228c13589a, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1734237495892 2024-12-15T04:38:20,147 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting f8160a3872d7427182507fd059c7ec13, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1734237495892 2024-12-15T04:38:20,147 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 94ff6d0cdfa54b03922c6f972dc2f43f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734237496527 2024-12-15T04:38:20,147 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f2b0b387a134143bcc26c9c7fd54e86, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734237496527 2024-12-15T04:38:20,148 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5cf74a1384684e4e8f1af1b64b15442a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1734237497695 2024-12-15T04:38:20,148 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 0983e0f198de41658ef78c6fc4c4ff21, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1734237497695 2024-12-15T04:38:20,169 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#A#compaction#55 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:20,169 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/41b9606eeec04c6196b7c0026df92063 is 50, key is test_row_0/A:col10/1734237499231/Put/seqid=0 2024-12-15T04:38:20,171 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#B#compaction#54 average throughput is 0.82 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:20,171 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/601cb9debaca474aa5bc89a57fad81f1 is 50, key is test_row_0/B:col10/1734237499231/Put/seqid=0 2024-12-15T04:38:20,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741893_1069 (size=12915) 2024-12-15T04:38:20,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741894_1070 (size=12915) 2024-12-15T04:38:20,208 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/601cb9debaca474aa5bc89a57fad81f1 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/601cb9debaca474aa5bc89a57fad81f1 2024-12-15T04:38:20,221 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/B of 6443b0fc7191a86cb86de2a8c7e17f47 into 601cb9debaca474aa5bc89a57fad81f1(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:20,221 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:20,221 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/B, priority=12, startTime=1734237500143; duration=0sec 2024-12-15T04:38:20,221 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:20,221 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:B 2024-12-15T04:38:20,221 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-15T04:38:20,223 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49232 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-15T04:38:20,223 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/C is initiating minor compaction (all files) 2024-12-15T04:38:20,223 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/C in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:20,223 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/b88127cd3e194d39bc32567f2e436a2b, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/fd65579037744fd5b26da1a50f75e059, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/9406626a5b624ed18decaecaaae66562, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/e9a0a99e729644cead3ea33cb0bf88d6] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=48.1 K 2024-12-15T04:38:20,224 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting b88127cd3e194d39bc32567f2e436a2b, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=212, earliestPutTs=1734237495096 2024-12-15T04:38:20,224 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting fd65579037744fd5b26da1a50f75e059, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1734237495892 2024-12-15T04:38:20,225 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 9406626a5b624ed18decaecaaae66562, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734237496527 2024-12-15T04:38:20,226 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting e9a0a99e729644cead3ea33cb0bf88d6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1734237497695 2024-12-15T04:38:20,236 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#C#compaction#56 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:20,237 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/a7a40bc87ad04d7d979f789beabc91c6 is 50, key is test_row_0/C:col10/1734237499231/Put/seqid=0 2024-12-15T04:38:20,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741895_1071 (size=12915) 2024-12-15T04:38:20,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:20,369 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-15T04:38:20,369 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:20,369 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:20,369 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:20,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:20,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:20,370 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:20,375 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/fc9dfd84d574492b9bdf27861b2e81ca is 50, key is test_row_0/A:col10/1734237500368/Put/seqid=0 2024-12-15T04:38:20,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741896_1072 (size=14741) 2024-12-15T04:38:20,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:20,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237560428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:20,532 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:20,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237560531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:20,587 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/41b9606eeec04c6196b7c0026df92063 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/41b9606eeec04c6196b7c0026df92063 2024-12-15T04:38:20,597 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/A of 6443b0fc7191a86cb86de2a8c7e17f47 into 41b9606eeec04c6196b7c0026df92063(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:20,598 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:20,598 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/A, priority=12, startTime=1734237500143; duration=0sec 2024-12-15T04:38:20,598 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:20,598 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:A 2024-12-15T04:38:20,659 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/a7a40bc87ad04d7d979f789beabc91c6 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/a7a40bc87ad04d7d979f789beabc91c6 2024-12-15T04:38:20,669 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/C of 6443b0fc7191a86cb86de2a8c7e17f47 into a7a40bc87ad04d7d979f789beabc91c6(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:20,669 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:20,669 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/C, priority=12, startTime=1734237500143; duration=0sec 2024-12-15T04:38:20,669 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:20,669 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:C 2024-12-15T04:38:20,735 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:20,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237560734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:20,779 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/fc9dfd84d574492b9bdf27861b2e81ca 2024-12-15T04:38:20,790 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/d80f4abc4fcc4e5cad6a062c39206286 is 50, key is test_row_0/B:col10/1734237500368/Put/seqid=0 2024-12-15T04:38:20,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741897_1073 (size=12301) 2024-12-15T04:38:20,797 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/d80f4abc4fcc4e5cad6a062c39206286 2024-12-15T04:38:20,807 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/f52791f234064173b42f5dd426d1ca03 is 50, key is test_row_0/C:col10/1734237500368/Put/seqid=0 2024-12-15T04:38:20,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741898_1074 (size=12301) 2024-12-15T04:38:20,818 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/f52791f234064173b42f5dd426d1ca03 2024-12-15T04:38:20,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/fc9dfd84d574492b9bdf27861b2e81ca as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/fc9dfd84d574492b9bdf27861b2e81ca 2024-12-15T04:38:20,833 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/fc9dfd84d574492b9bdf27861b2e81ca, entries=200, sequenceid=289, filesize=14.4 K 2024-12-15T04:38:20,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/d80f4abc4fcc4e5cad6a062c39206286 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/d80f4abc4fcc4e5cad6a062c39206286 2024-12-15T04:38:20,841 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/d80f4abc4fcc4e5cad6a062c39206286, entries=150, sequenceid=289, filesize=12.0 K 2024-12-15T04:38:20,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/f52791f234064173b42f5dd426d1ca03 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f52791f234064173b42f5dd426d1ca03 2024-12-15T04:38:20,850 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f52791f234064173b42f5dd426d1ca03, entries=150, sequenceid=289, filesize=12.0 K 2024-12-15T04:38:20,850 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 6443b0fc7191a86cb86de2a8c7e17f47 in 481ms, sequenceid=289, compaction requested=false 2024-12-15T04:38:20,851 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:20,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-12-15T04:38:20,956 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-12-15T04:38:20,957 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:38:20,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-12-15T04:38:20,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-15T04:38:20,959 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:38:20,959 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:38:20,960 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:38:21,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:21,041 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-15T04:38:21,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:21,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:21,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:21,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:21,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:21,041 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:21,046 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/4a41a1994d9c4e6b99b7344fdc7d6c27 is 50, key is test_row_0/A:col10/1734237501039/Put/seqid=0 2024-12-15T04:38:21,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741899_1075 (size=12301) 2024-12-15T04:38:21,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-15T04:38:21,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:21,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237561059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:21,114 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:21,115 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-15T04:38:21,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:21,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:21,115 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:21,115 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:21,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:21,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:21,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:21,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237561162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:21,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-15T04:38:21,271 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:21,271 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-15T04:38:21,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:21,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:21,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:21,272 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:21,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:21,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:21,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:21,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237561365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:21,424 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:21,425 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-15T04:38:21,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:21,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:21,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:21,425 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:21,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:21,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:21,451 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/4a41a1994d9c4e6b99b7344fdc7d6c27 2024-12-15T04:38:21,484 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/d62df976dead403ba19d0c1b34c2ac22 is 50, key is test_row_0/B:col10/1734237501039/Put/seqid=0 2024-12-15T04:38:21,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741900_1076 (size=12301) 2024-12-15T04:38:21,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-15T04:38:21,580 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:21,580 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-15T04:38:21,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:21,580 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:21,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:21,581 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:21,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:21,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:21,669 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:21,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237561668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:21,733 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:21,734 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-15T04:38:21,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:21,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:21,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:21,734 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:21,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:21,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:21,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:21,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237561823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:21,824 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4126 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., hostname=e56de37b85b3,43199,1734237482035, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:38:21,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:21,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237561835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:21,837 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4136 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., hostname=e56de37b85b3,43199,1734237482035, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:38:21,886 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:21,887 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-15T04:38:21,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:21,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:21,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:21,887 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:21,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:21,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:21,928 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/d62df976dead403ba19d0c1b34c2ac22 2024-12-15T04:38:21,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/c1c349c710584588aac03e60608ac03b is 50, key is test_row_0/C:col10/1734237501039/Put/seqid=0 2024-12-15T04:38:21,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741901_1077 (size=12301) 2024-12-15T04:38:22,039 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:22,040 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-15T04:38:22,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:22,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:22,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:22,040 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:22,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:22,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:22,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-15T04:38:22,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:22,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237562172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:22,192 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:22,193 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-15T04:38:22,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:22,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:22,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:22,193 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:22,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:22,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:22,311 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:22,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46242 deadline: 1734237562310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:22,312 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8183 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., hostname=e56de37b85b3,43199,1734237482035, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:38:22,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:22,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46250 deadline: 1734237562324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:22,326 DEBUG [Thread-151 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8200 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., hostname=e56de37b85b3,43199,1734237482035, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:38:22,343 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/c1c349c710584588aac03e60608ac03b 2024-12-15T04:38:22,345 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:22,346 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-15T04:38:22,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:22,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:22,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:22,346 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:22,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:22,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:22,351 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/4a41a1994d9c4e6b99b7344fdc7d6c27 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4a41a1994d9c4e6b99b7344fdc7d6c27 2024-12-15T04:38:22,355 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4a41a1994d9c4e6b99b7344fdc7d6c27, entries=150, sequenceid=317, filesize=12.0 K 2024-12-15T04:38:22,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/d62df976dead403ba19d0c1b34c2ac22 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/d62df976dead403ba19d0c1b34c2ac22 2024-12-15T04:38:22,364 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/d62df976dead403ba19d0c1b34c2ac22, entries=150, sequenceid=317, filesize=12.0 K 2024-12-15T04:38:22,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/c1c349c710584588aac03e60608ac03b as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/c1c349c710584588aac03e60608ac03b 2024-12-15T04:38:22,373 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/c1c349c710584588aac03e60608ac03b, entries=150, sequenceid=317, filesize=12.0 K 2024-12-15T04:38:22,374 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 6443b0fc7191a86cb86de2a8c7e17f47 in 1333ms, sequenceid=317, compaction requested=true 2024-12-15T04:38:22,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:22,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:38:22,375 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:22,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:22,375 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:22,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:38:22,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:22,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:38:22,375 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:22,376 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37517 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:22,376 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39957 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:22,376 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/A is initiating minor compaction (all files) 2024-12-15T04:38:22,376 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/B is initiating minor compaction (all files) 2024-12-15T04:38:22,376 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/B in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:22,376 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/A in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:22,376 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/601cb9debaca474aa5bc89a57fad81f1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/d80f4abc4fcc4e5cad6a062c39206286, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/d62df976dead403ba19d0c1b34c2ac22] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=36.6 K 2024-12-15T04:38:22,376 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/41b9606eeec04c6196b7c0026df92063, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/fc9dfd84d574492b9bdf27861b2e81ca, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4a41a1994d9c4e6b99b7344fdc7d6c27] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=39.0 K 2024-12-15T04:38:22,377 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 41b9606eeec04c6196b7c0026df92063, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1734237497695 2024-12-15T04:38:22,377 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 601cb9debaca474aa5bc89a57fad81f1, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1734237497695 2024-12-15T04:38:22,377 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc9dfd84d574492b9bdf27861b2e81ca, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1734237499236 2024-12-15T04:38:22,377 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting d80f4abc4fcc4e5cad6a062c39206286, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1734237499245 2024-12-15T04:38:22,378 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a41a1994d9c4e6b99b7344fdc7d6c27, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1734237500412 2024-12-15T04:38:22,378 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting d62df976dead403ba19d0c1b34c2ac22, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1734237500412 2024-12-15T04:38:22,386 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#A#compaction#63 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:22,389 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#B#compaction#64 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:22,389 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/63a4b7833de74f449632769c68ab9edf is 50, key is test_row_0/B:col10/1734237501039/Put/seqid=0 2024-12-15T04:38:22,390 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/93bd2e6a761d4d9d8feceeb40611806c is 50, key is test_row_0/A:col10/1734237501039/Put/seqid=0 2024-12-15T04:38:22,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741903_1079 (size=13017) 2024-12-15T04:38:22,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741902_1078 (size=13017) 2024-12-15T04:38:22,423 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/93bd2e6a761d4d9d8feceeb40611806c as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/93bd2e6a761d4d9d8feceeb40611806c 2024-12-15T04:38:22,433 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/A of 6443b0fc7191a86cb86de2a8c7e17f47 into 93bd2e6a761d4d9d8feceeb40611806c(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:22,434 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:22,434 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/A, priority=13, startTime=1734237502375; duration=0sec 2024-12-15T04:38:22,434 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:22,434 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:A 2024-12-15T04:38:22,434 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:22,436 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37517 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:22,436 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/C is initiating minor compaction (all files) 2024-12-15T04:38:22,436 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/C in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:22,436 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/a7a40bc87ad04d7d979f789beabc91c6, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f52791f234064173b42f5dd426d1ca03, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/c1c349c710584588aac03e60608ac03b] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=36.6 K 2024-12-15T04:38:22,437 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting a7a40bc87ad04d7d979f789beabc91c6, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1734237497695 2024-12-15T04:38:22,437 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting f52791f234064173b42f5dd426d1ca03, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1734237499245 2024-12-15T04:38:22,437 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting c1c349c710584588aac03e60608ac03b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1734237500412 2024-12-15T04:38:22,461 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#C#compaction#65 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:22,461 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/cae6591611b148ebaef0d94a391fac9d is 50, key is test_row_0/C:col10/1734237501039/Put/seqid=0 2024-12-15T04:38:22,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741904_1080 (size=13017) 2024-12-15T04:38:22,494 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/cae6591611b148ebaef0d94a391fac9d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/cae6591611b148ebaef0d94a391fac9d 2024-12-15T04:38:22,498 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:22,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-12-15T04:38:22,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:22,499 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-15T04:38:22,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:22,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:22,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:22,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:22,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:22,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:22,503 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/C of 6443b0fc7191a86cb86de2a8c7e17f47 into cae6591611b148ebaef0d94a391fac9d(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:22,503 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:22,503 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/C, priority=13, startTime=1734237502375; duration=0sec 2024-12-15T04:38:22,503 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:22,503 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:C 2024-12-15T04:38:22,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/b98e820c1c1d41548f30b3d95236fb13 is 50, key is test_row_0/A:col10/1734237501058/Put/seqid=0 2024-12-15T04:38:22,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741905_1081 (size=12301) 2024-12-15T04:38:22,518 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/b98e820c1c1d41548f30b3d95236fb13 2024-12-15T04:38:22,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/fbf9b68fad664b5cb4a11ad9b2890304 is 50, key is test_row_0/B:col10/1734237501058/Put/seqid=0 2024-12-15T04:38:22,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741906_1082 (size=12301) 2024-12-15T04:38:22,533 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/fbf9b68fad664b5cb4a11ad9b2890304 2024-12-15T04:38:22,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/91df86347d4f42e6bcd22bdecefe6428 is 50, key is test_row_0/C:col10/1734237501058/Put/seqid=0 2024-12-15T04:38:22,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741907_1083 (size=12301) 2024-12-15T04:38:22,805 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/63a4b7833de74f449632769c68ab9edf as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/63a4b7833de74f449632769c68ab9edf 2024-12-15T04:38:22,812 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/B of 6443b0fc7191a86cb86de2a8c7e17f47 into 63a4b7833de74f449632769c68ab9edf(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:22,812 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:22,812 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/B, priority=13, startTime=1734237502375; duration=0sec 2024-12-15T04:38:22,812 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:22,812 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:B 2024-12-15T04:38:22,947 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/91df86347d4f42e6bcd22bdecefe6428 2024-12-15T04:38:22,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/b98e820c1c1d41548f30b3d95236fb13 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/b98e820c1c1d41548f30b3d95236fb13 2024-12-15T04:38:22,959 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/b98e820c1c1d41548f30b3d95236fb13, entries=150, sequenceid=328, filesize=12.0 K 2024-12-15T04:38:22,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/fbf9b68fad664b5cb4a11ad9b2890304 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/fbf9b68fad664b5cb4a11ad9b2890304 2024-12-15T04:38:22,965 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/fbf9b68fad664b5cb4a11ad9b2890304, entries=150, sequenceid=328, filesize=12.0 K 2024-12-15T04:38:22,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/91df86347d4f42e6bcd22bdecefe6428 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/91df86347d4f42e6bcd22bdecefe6428 2024-12-15T04:38:22,971 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/91df86347d4f42e6bcd22bdecefe6428, entries=150, sequenceid=328, filesize=12.0 K 2024-12-15T04:38:22,973 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=0 B/0 for 6443b0fc7191a86cb86de2a8c7e17f47 in 474ms, sequenceid=328, compaction requested=false 2024-12-15T04:38:22,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:22,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:22,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-15T04:38:22,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-15T04:38:22,976 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-12-15T04:38:22,976 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0140 sec 2024-12-15T04:38:22,978 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 2.0200 sec 2024-12-15T04:38:23,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-12-15T04:38:23,063 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-12-15T04:38:23,064 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:38:23,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-12-15T04:38:23,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-15T04:38:23,066 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:38:23,067 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:38:23,067 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:38:23,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-15T04:38:23,190 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-15T04:38:23,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:23,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:23,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:23,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:23,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:23,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:23,191 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:23,196 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/4b49f35e4fe048f19856fd13b6580941 is 50, key is test_row_0/A:col10/1734237503189/Put/seqid=0 2024-12-15T04:38:23,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741908_1084 (size=14741) 2024-12-15T04:38:23,217 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/4b49f35e4fe048f19856fd13b6580941 2024-12-15T04:38:23,220 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:23,220 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-15T04:38:23,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:23,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:23,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:23,220 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:23,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:23,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:23,227 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/2fc2489c014545d7adf0a0db71e6147d is 50, key is test_row_0/B:col10/1734237503189/Put/seqid=0 2024-12-15T04:38:23,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741909_1085 (size=12301) 2024-12-15T04:38:23,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:23,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237563251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:23,355 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:23,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237563354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:23,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-15T04:38:23,373 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:23,373 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-15T04:38:23,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:23,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:23,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:23,373 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:23,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:23,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:23,526 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:23,526 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-15T04:38:23,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:23,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:23,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:23,526 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:23,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:23,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:23,557 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:23,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237563557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:23,632 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/2fc2489c014545d7adf0a0db71e6147d 2024-12-15T04:38:23,645 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/48553b3f609c4321b8f63f5a91ef37e0 is 50, key is test_row_0/C:col10/1734237503189/Put/seqid=0 2024-12-15T04:38:23,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741910_1086 (size=12301) 2024-12-15T04:38:23,656 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=341 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/48553b3f609c4321b8f63f5a91ef37e0 2024-12-15T04:38:23,663 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/4b49f35e4fe048f19856fd13b6580941 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4b49f35e4fe048f19856fd13b6580941 2024-12-15T04:38:23,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-15T04:38:23,670 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4b49f35e4fe048f19856fd13b6580941, entries=200, sequenceid=341, filesize=14.4 K 2024-12-15T04:38:23,672 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/2fc2489c014545d7adf0a0db71e6147d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/2fc2489c014545d7adf0a0db71e6147d 2024-12-15T04:38:23,678 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/2fc2489c014545d7adf0a0db71e6147d, entries=150, sequenceid=341, filesize=12.0 K 2024-12-15T04:38:23,679 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:23,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/48553b3f609c4321b8f63f5a91ef37e0 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/48553b3f609c4321b8f63f5a91ef37e0 2024-12-15T04:38:23,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-15T04:38:23,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:23,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:23,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:23,680 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:23,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:23,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:23,690 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/48553b3f609c4321b8f63f5a91ef37e0, entries=150, sequenceid=341, filesize=12.0 K 2024-12-15T04:38:23,690 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 6443b0fc7191a86cb86de2a8c7e17f47 in 500ms, sequenceid=341, compaction requested=true 2024-12-15T04:38:23,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:23,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:38:23,691 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:23,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:23,691 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:23,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:38:23,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:23,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:38:23,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:23,692 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40059 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:23,692 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/A is initiating minor compaction (all files) 2024-12-15T04:38:23,692 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:23,692 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/B is initiating minor compaction (all files) 2024-12-15T04:38:23,692 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/A in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:23,693 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/B in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:23,693 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/63a4b7833de74f449632769c68ab9edf, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/fbf9b68fad664b5cb4a11ad9b2890304, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/2fc2489c014545d7adf0a0db71e6147d] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=36.7 K 2024-12-15T04:38:23,693 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/93bd2e6a761d4d9d8feceeb40611806c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/b98e820c1c1d41548f30b3d95236fb13, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4b49f35e4fe048f19856fd13b6580941] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=39.1 K 2024-12-15T04:38:23,693 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 63a4b7833de74f449632769c68ab9edf, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1734237500412 2024-12-15T04:38:23,693 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93bd2e6a761d4d9d8feceeb40611806c, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1734237500412 2024-12-15T04:38:23,694 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting fbf9b68fad664b5cb4a11ad9b2890304, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1734237501052 2024-12-15T04:38:23,695 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting b98e820c1c1d41548f30b3d95236fb13, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1734237501052 2024-12-15T04:38:23,695 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 2fc2489c014545d7adf0a0db71e6147d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1734237503183 2024-12-15T04:38:23,695 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b49f35e4fe048f19856fd13b6580941, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1734237503178 2024-12-15T04:38:23,709 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#B#compaction#72 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:23,709 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/9e2ad297aee94eccade8b812ef371435 is 50, key is test_row_0/B:col10/1734237503189/Put/seqid=0 2024-12-15T04:38:23,712 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#A#compaction#73 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:23,712 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/e85dd22b8eb4494eac3eb78e36c48b47 is 50, key is test_row_0/A:col10/1734237503189/Put/seqid=0 2024-12-15T04:38:23,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741911_1087 (size=13119) 2024-12-15T04:38:23,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741912_1088 (size=13119) 2024-12-15T04:38:23,731 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/e85dd22b8eb4494eac3eb78e36c48b47 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/e85dd22b8eb4494eac3eb78e36c48b47 2024-12-15T04:38:23,737 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/A of 6443b0fc7191a86cb86de2a8c7e17f47 into e85dd22b8eb4494eac3eb78e36c48b47(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:23,737 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:23,737 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/A, priority=13, startTime=1734237503691; duration=0sec 2024-12-15T04:38:23,737 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:23,737 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:A 2024-12-15T04:38:23,738 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:23,739 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:23,739 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/C is initiating minor compaction (all files) 2024-12-15T04:38:23,739 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/C in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:23,739 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/cae6591611b148ebaef0d94a391fac9d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/91df86347d4f42e6bcd22bdecefe6428, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/48553b3f609c4321b8f63f5a91ef37e0] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=36.7 K 2024-12-15T04:38:23,740 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting cae6591611b148ebaef0d94a391fac9d, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1734237500412 2024-12-15T04:38:23,740 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 91df86347d4f42e6bcd22bdecefe6428, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1734237501052 2024-12-15T04:38:23,741 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 48553b3f609c4321b8f63f5a91ef37e0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1734237503183 2024-12-15T04:38:23,752 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#C#compaction#74 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:23,752 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/9c1d52649b924f78a9adf2a14e0bfc46 is 50, key is test_row_0/C:col10/1734237503189/Put/seqid=0 2024-12-15T04:38:23,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741913_1089 (size=13119) 2024-12-15T04:38:23,776 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/9c1d52649b924f78a9adf2a14e0bfc46 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/9c1d52649b924f78a9adf2a14e0bfc46 2024-12-15T04:38:23,782 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/C of 6443b0fc7191a86cb86de2a8c7e17f47 into 9c1d52649b924f78a9adf2a14e0bfc46(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:23,783 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:23,783 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/C, priority=13, startTime=1734237503692; duration=0sec 2024-12-15T04:38:23,783 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:23,783 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:C 2024-12-15T04:38:23,833 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:23,834 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-12-15T04:38:23,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:23,834 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-15T04:38:23,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:23,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:23,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:23,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:23,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:23,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:23,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/8b4f7e41c27c4f96bd42c4189a551516 is 50, key is test_row_0/A:col10/1734237503247/Put/seqid=0 2024-12-15T04:38:23,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741914_1090 (size=12301) 2024-12-15T04:38:23,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:23,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:23,880 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:23,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237563878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:23,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:23,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237563981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:24,121 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/9e2ad297aee94eccade8b812ef371435 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/9e2ad297aee94eccade8b812ef371435 2024-12-15T04:38:24,127 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/B of 6443b0fc7191a86cb86de2a8c7e17f47 into 9e2ad297aee94eccade8b812ef371435(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:24,127 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:24,127 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/B, priority=13, startTime=1734237503691; duration=0sec 2024-12-15T04:38:24,127 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:24,128 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:B 2024-12-15T04:38:24,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-15T04:38:24,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:24,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237564184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:24,244 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/8b4f7e41c27c4f96bd42c4189a551516 2024-12-15T04:38:24,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/eaf56634106f4367ab72f1d3b17f09b4 is 50, key is test_row_0/B:col10/1734237503247/Put/seqid=0 2024-12-15T04:38:24,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741915_1091 (size=12301) 2024-12-15T04:38:24,488 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:24,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237564488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:24,658 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/eaf56634106f4367ab72f1d3b17f09b4 2024-12-15T04:38:24,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/7367e35a70b64a46a462b285653c2fe7 is 50, key is test_row_0/C:col10/1734237503247/Put/seqid=0 2024-12-15T04:38:24,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741916_1092 (size=12301) 2024-12-15T04:38:24,677 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/7367e35a70b64a46a462b285653c2fe7 2024-12-15T04:38:24,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/8b4f7e41c27c4f96bd42c4189a551516 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/8b4f7e41c27c4f96bd42c4189a551516 2024-12-15T04:38:24,691 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/8b4f7e41c27c4f96bd42c4189a551516, entries=150, sequenceid=368, filesize=12.0 K 2024-12-15T04:38:24,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/eaf56634106f4367ab72f1d3b17f09b4 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/eaf56634106f4367ab72f1d3b17f09b4 2024-12-15T04:38:24,699 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/eaf56634106f4367ab72f1d3b17f09b4, entries=150, sequenceid=368, filesize=12.0 K 2024-12-15T04:38:24,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/7367e35a70b64a46a462b285653c2fe7 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/7367e35a70b64a46a462b285653c2fe7 2024-12-15T04:38:24,709 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/7367e35a70b64a46a462b285653c2fe7, entries=150, sequenceid=368, filesize=12.0 K 2024-12-15T04:38:24,712 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 6443b0fc7191a86cb86de2a8c7e17f47 in 878ms, sequenceid=368, compaction requested=false 2024-12-15T04:38:24,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:24,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:24,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-15T04:38:24,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-15T04:38:24,718 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-12-15T04:38:24,718 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6470 sec 2024-12-15T04:38:24,722 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.6550 sec 2024-12-15T04:38:24,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:24,992 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-15T04:38:24,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:24,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:24,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:24,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:24,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:24,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:24,997 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/4aacd127f6d147acbfeeca93b8813560 is 50, key is test_row_0/A:col10/1734237503871/Put/seqid=0 2024-12-15T04:38:25,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741917_1093 (size=12301) 2024-12-15T04:38:25,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:25,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237565040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:25,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:25,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237565143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:25,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-15T04:38:25,170 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-12-15T04:38:25,171 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:38:25,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-12-15T04:38:25,172 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:38:25,172 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:38:25,172 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:38:25,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-15T04:38:25,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-15T04:38:25,324 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:25,325 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-15T04:38:25,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:25,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:25,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:25,325 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:25,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:25,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:25,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:25,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237565346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:25,406 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/4aacd127f6d147acbfeeca93b8813560 2024-12-15T04:38:25,415 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/e45c2e705f544371893ec59ebb2b5e44 is 50, key is test_row_0/B:col10/1734237503871/Put/seqid=0 2024-12-15T04:38:25,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741918_1094 (size=12301) 2024-12-15T04:38:25,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-15T04:38:25,477 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:25,478 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-15T04:38:25,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:25,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:25,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:25,478 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:25,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:25,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:25,630 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:25,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-15T04:38:25,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:25,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:25,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:25,631 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:25,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:25,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:25,649 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:25,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237565648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:25,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-15T04:38:25,783 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:25,783 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-15T04:38:25,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:25,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:25,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:25,784 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:25,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:25,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:25,819 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/e45c2e705f544371893ec59ebb2b5e44 2024-12-15T04:38:25,828 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/6e02323714824a1e866df6155eb3e44c is 50, key is test_row_0/C:col10/1734237503871/Put/seqid=0 2024-12-15T04:38:25,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741919_1095 (size=12301) 2024-12-15T04:38:25,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:25,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46200 deadline: 1734237565842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:25,844 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8146 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., hostname=e56de37b85b3,43199,1734237482035, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:38:25,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:25,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46246 deadline: 1734237565843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:25,845 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8145 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., hostname=e56de37b85b3,43199,1734237482035, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:38:25,936 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:25,936 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-15T04:38:25,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:25,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:25,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:25,937 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:25,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:25,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:26,089 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:26,089 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-15T04:38:26,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:26,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:26,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:26,090 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:26,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:26,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:26,155 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:26,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237566155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:26,234 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/6e02323714824a1e866df6155eb3e44c 2024-12-15T04:38:26,240 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/4aacd127f6d147acbfeeca93b8813560 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4aacd127f6d147acbfeeca93b8813560 2024-12-15T04:38:26,242 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:26,242 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-15T04:38:26,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:26,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:26,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:26,242 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:26,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:26,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:26,246 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4aacd127f6d147acbfeeca93b8813560, entries=150, sequenceid=381, filesize=12.0 K 2024-12-15T04:38:26,247 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/e45c2e705f544371893ec59ebb2b5e44 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/e45c2e705f544371893ec59ebb2b5e44 2024-12-15T04:38:26,252 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/e45c2e705f544371893ec59ebb2b5e44, entries=150, sequenceid=381, filesize=12.0 K 2024-12-15T04:38:26,253 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/6e02323714824a1e866df6155eb3e44c as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/6e02323714824a1e866df6155eb3e44c 2024-12-15T04:38:26,259 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/6e02323714824a1e866df6155eb3e44c, entries=150, sequenceid=381, filesize=12.0 K 2024-12-15T04:38:26,260 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 6443b0fc7191a86cb86de2a8c7e17f47 in 1267ms, sequenceid=381, compaction requested=true 2024-12-15T04:38:26,260 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:26,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:38:26,260 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:26,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:26,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:38:26,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:26,260 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:26,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:38:26,260 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:26,261 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:26,261 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:26,261 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/A is initiating minor compaction (all files) 2024-12-15T04:38:26,261 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/B is initiating minor compaction (all files) 2024-12-15T04:38:26,261 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/A in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:26,261 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/B in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:26,261 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/e85dd22b8eb4494eac3eb78e36c48b47, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/8b4f7e41c27c4f96bd42c4189a551516, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4aacd127f6d147acbfeeca93b8813560] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=36.8 K 2024-12-15T04:38:26,261 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/9e2ad297aee94eccade8b812ef371435, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/eaf56634106f4367ab72f1d3b17f09b4, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/e45c2e705f544371893ec59ebb2b5e44] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=36.8 K 2024-12-15T04:38:26,262 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting e85dd22b8eb4494eac3eb78e36c48b47, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1734237503183 2024-12-15T04:38:26,262 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e2ad297aee94eccade8b812ef371435, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1734237503183 2024-12-15T04:38:26,262 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting eaf56634106f4367ab72f1d3b17f09b4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1734237503244 2024-12-15T04:38:26,262 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b4f7e41c27c4f96bd42c4189a551516, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1734237503244 2024-12-15T04:38:26,263 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting e45c2e705f544371893ec59ebb2b5e44, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1734237503871 2024-12-15T04:38:26,263 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4aacd127f6d147acbfeeca93b8813560, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1734237503871 2024-12-15T04:38:26,270 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#A#compaction#81 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:26,270 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#B#compaction#82 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:26,271 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/9cbec089649741f38508a30a1b3febb5 is 50, key is test_row_0/A:col10/1734237503871/Put/seqid=0 2024-12-15T04:38:26,271 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/a1599cc2e4324873801f16dc30ce0e18 is 50, key is test_row_0/B:col10/1734237503871/Put/seqid=0 2024-12-15T04:38:26,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-15T04:38:26,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741921_1097 (size=13221) 2024-12-15T04:38:26,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741920_1096 (size=13221) 2024-12-15T04:38:26,394 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:26,394 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-12-15T04:38:26,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:26,395 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-15T04:38:26,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:26,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:26,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:26,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:26,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:26,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:26,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/95e5867c533e44c69da4918b1ff078ac is 50, key is test_row_0/A:col10/1734237505032/Put/seqid=0 2024-12-15T04:38:26,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741922_1098 (size=12301) 2024-12-15T04:38:26,686 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/a1599cc2e4324873801f16dc30ce0e18 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/a1599cc2e4324873801f16dc30ce0e18 2024-12-15T04:38:26,687 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/9cbec089649741f38508a30a1b3febb5 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/9cbec089649741f38508a30a1b3febb5 2024-12-15T04:38:26,705 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/B of 6443b0fc7191a86cb86de2a8c7e17f47 into a1599cc2e4324873801f16dc30ce0e18(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:26,705 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:26,705 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/B, priority=13, startTime=1734237506260; duration=0sec 2024-12-15T04:38:26,706 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:26,706 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:B 2024-12-15T04:38:26,706 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:26,706 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/A of 6443b0fc7191a86cb86de2a8c7e17f47 into 9cbec089649741f38508a30a1b3febb5(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:26,706 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:26,706 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/A, priority=13, startTime=1734237506260; duration=0sec 2024-12-15T04:38:26,706 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:26,706 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:A 2024-12-15T04:38:26,707 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:26,707 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/C is initiating minor compaction (all files) 2024-12-15T04:38:26,708 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/C in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:26,708 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/9c1d52649b924f78a9adf2a14e0bfc46, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/7367e35a70b64a46a462b285653c2fe7, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/6e02323714824a1e866df6155eb3e44c] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=36.8 K 2024-12-15T04:38:26,708 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c1d52649b924f78a9adf2a14e0bfc46, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=341, earliestPutTs=1734237503183 2024-12-15T04:38:26,708 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 7367e35a70b64a46a462b285653c2fe7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1734237503244 2024-12-15T04:38:26,709 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e02323714824a1e866df6155eb3e44c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1734237503871 2024-12-15T04:38:26,720 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#C#compaction#84 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:26,720 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/9674f49735494b8f810c5bc615595cbf is 50, key is test_row_0/C:col10/1734237503871/Put/seqid=0 2024-12-15T04:38:26,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741923_1099 (size=13221) 2024-12-15T04:38:26,813 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/95e5867c533e44c69da4918b1ff078ac 2024-12-15T04:38:26,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/f10ab9e0907e426fafa5f1b7da78ec7c is 50, key is test_row_0/B:col10/1734237505032/Put/seqid=0 2024-12-15T04:38:26,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741924_1100 (size=12301) 2024-12-15T04:38:27,136 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/9674f49735494b8f810c5bc615595cbf as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/9674f49735494b8f810c5bc615595cbf 2024-12-15T04:38:27,144 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/C of 6443b0fc7191a86cb86de2a8c7e17f47 into 9674f49735494b8f810c5bc615595cbf(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:27,144 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:27,144 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/C, priority=13, startTime=1734237506260; duration=0sec 2024-12-15T04:38:27,144 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:27,144 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:C 2024-12-15T04:38:27,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:27,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:27,179 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:27,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237567178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:27,229 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/f10ab9e0907e426fafa5f1b7da78ec7c 2024-12-15T04:38:27,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/f9394a74a00a47dfbfeb0dc2e8137d23 is 50, key is test_row_0/C:col10/1734237505032/Put/seqid=0 2024-12-15T04:38:27,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741925_1101 (size=12301) 2024-12-15T04:38:27,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-15T04:38:27,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:27,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237567280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:27,484 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:27,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237567483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:27,646 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/f9394a74a00a47dfbfeb0dc2e8137d23 2024-12-15T04:38:27,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/95e5867c533e44c69da4918b1ff078ac as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/95e5867c533e44c69da4918b1ff078ac 2024-12-15T04:38:27,657 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/95e5867c533e44c69da4918b1ff078ac, entries=150, sequenceid=405, filesize=12.0 K 2024-12-15T04:38:27,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/f10ab9e0907e426fafa5f1b7da78ec7c as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/f10ab9e0907e426fafa5f1b7da78ec7c 2024-12-15T04:38:27,664 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/f10ab9e0907e426fafa5f1b7da78ec7c, entries=150, sequenceid=405, filesize=12.0 K 2024-12-15T04:38:27,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/f9394a74a00a47dfbfeb0dc2e8137d23 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f9394a74a00a47dfbfeb0dc2e8137d23 2024-12-15T04:38:27,670 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f9394a74a00a47dfbfeb0dc2e8137d23, entries=150, sequenceid=405, filesize=12.0 K 2024-12-15T04:38:27,671 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 6443b0fc7191a86cb86de2a8c7e17f47 in 1277ms, sequenceid=405, compaction requested=false 2024-12-15T04:38:27,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:27,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:27,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-12-15T04:38:27,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-12-15T04:38:27,674 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-12-15T04:38:27,674 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5010 sec 2024-12-15T04:38:27,676 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 2.5040 sec 2024-12-15T04:38:27,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:27,788 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-15T04:38:27,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:27,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:27,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:27,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:27,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:27,789 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:27,793 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/4845fb9eb1db45719b1c467f838d20df is 50, key is test_row_0/A:col10/1734237507787/Put/seqid=0 2024-12-15T04:38:27,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741926_1102 (size=14741) 2024-12-15T04:38:27,815 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=421 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/4845fb9eb1db45719b1c467f838d20df 2024-12-15T04:38:27,823 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/c49dfce6f08c4db197b2843b0c12d8c8 is 50, key is test_row_0/B:col10/1734237507787/Put/seqid=0 2024-12-15T04:38:27,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741927_1103 (size=12301) 2024-12-15T04:38:27,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:27,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237567836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:27,940 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:27,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237567938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:28,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:28,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237568142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:28,227 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=421 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/c49dfce6f08c4db197b2843b0c12d8c8 2024-12-15T04:38:28,236 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/f3826e834efe47f886afb177a7ff54b2 is 50, key is test_row_0/C:col10/1734237507787/Put/seqid=0 2024-12-15T04:38:28,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741928_1104 (size=12301) 2024-12-15T04:38:28,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:28,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237568445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:28,640 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=421 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/f3826e834efe47f886afb177a7ff54b2 2024-12-15T04:38:28,646 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/4845fb9eb1db45719b1c467f838d20df as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4845fb9eb1db45719b1c467f838d20df 2024-12-15T04:38:28,651 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4845fb9eb1db45719b1c467f838d20df, entries=200, sequenceid=421, filesize=14.4 K 2024-12-15T04:38:28,652 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/c49dfce6f08c4db197b2843b0c12d8c8 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/c49dfce6f08c4db197b2843b0c12d8c8 2024-12-15T04:38:28,656 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/c49dfce6f08c4db197b2843b0c12d8c8, entries=150, sequenceid=421, filesize=12.0 K 2024-12-15T04:38:28,658 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/f3826e834efe47f886afb177a7ff54b2 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f3826e834efe47f886afb177a7ff54b2 2024-12-15T04:38:28,662 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f3826e834efe47f886afb177a7ff54b2, entries=150, sequenceid=421, filesize=12.0 K 2024-12-15T04:38:28,663 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 6443b0fc7191a86cb86de2a8c7e17f47 in 875ms, sequenceid=421, compaction requested=true 2024-12-15T04:38:28,663 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:28,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:38:28,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:28,664 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:28,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:38:28,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:28,664 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:28,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:38:28,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:28,665 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40263 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:28,665 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:28,665 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/A is initiating minor compaction (all files) 2024-12-15T04:38:28,665 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/B is initiating minor compaction (all files) 2024-12-15T04:38:28,665 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/A in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:28,665 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/B in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:28,665 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/9cbec089649741f38508a30a1b3febb5, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/95e5867c533e44c69da4918b1ff078ac, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4845fb9eb1db45719b1c467f838d20df] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=39.3 K 2024-12-15T04:38:28,665 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/a1599cc2e4324873801f16dc30ce0e18, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/f10ab9e0907e426fafa5f1b7da78ec7c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/c49dfce6f08c4db197b2843b0c12d8c8] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=36.9 K 2024-12-15T04:38:28,665 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9cbec089649741f38508a30a1b3febb5, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1734237503871 2024-12-15T04:38:28,665 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting a1599cc2e4324873801f16dc30ce0e18, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1734237503871 2024-12-15T04:38:28,666 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95e5867c533e44c69da4918b1ff078ac, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1734237505032 2024-12-15T04:38:28,666 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting f10ab9e0907e426fafa5f1b7da78ec7c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1734237505032 2024-12-15T04:38:28,666 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4845fb9eb1db45719b1c467f838d20df, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=421, earliestPutTs=1734237507165 2024-12-15T04:38:28,666 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting c49dfce6f08c4db197b2843b0c12d8c8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=421, earliestPutTs=1734237507170 2024-12-15T04:38:28,674 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#B#compaction#90 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:28,675 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/7b0cc8b5534349109ef63f0255e12fe6 is 50, key is test_row_0/B:col10/1734237507787/Put/seqid=0 2024-12-15T04:38:28,676 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#A#compaction#91 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:28,677 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/69d57b134b224bc9be82fa7a4b0d0ebe is 50, key is test_row_0/A:col10/1734237507787/Put/seqid=0 2024-12-15T04:38:28,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741929_1105 (size=13323) 2024-12-15T04:38:28,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741930_1106 (size=13323) 2024-12-15T04:38:28,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:28,952 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-15T04:38:28,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:28,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:28,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:28,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:28,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:28,952 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:28,956 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/1b15fd5e439348819053b48ced9cc7f4 is 50, key is test_row_0/A:col10/1734237507830/Put/seqid=0 2024-12-15T04:38:28,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741931_1107 (size=12301) 2024-12-15T04:38:28,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:28,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237568973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:28,976 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/1b15fd5e439348819053b48ced9cc7f4 2024-12-15T04:38:28,991 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/4e059809a3ba4b278d0ffdf749065ff3 is 50, key is test_row_0/B:col10/1734237507830/Put/seqid=0 2024-12-15T04:38:28,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741932_1108 (size=12301) 2024-12-15T04:38:29,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:29,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237569076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:29,102 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/69d57b134b224bc9be82fa7a4b0d0ebe as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/69d57b134b224bc9be82fa7a4b0d0ebe 2024-12-15T04:38:29,102 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/7b0cc8b5534349109ef63f0255e12fe6 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/7b0cc8b5534349109ef63f0255e12fe6 2024-12-15T04:38:29,107 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/B of 6443b0fc7191a86cb86de2a8c7e17f47 into 7b0cc8b5534349109ef63f0255e12fe6(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:29,107 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/A of 6443b0fc7191a86cb86de2a8c7e17f47 into 69d57b134b224bc9be82fa7a4b0d0ebe(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:29,108 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:29,108 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:29,108 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/A, priority=13, startTime=1734237508663; duration=0sec 2024-12-15T04:38:29,108 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/B, priority=13, startTime=1734237508664; duration=0sec 2024-12-15T04:38:29,108 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:29,108 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:A 2024-12-15T04:38:29,108 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:29,108 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:B 2024-12-15T04:38:29,108 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:29,113 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:29,114 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/C is initiating minor compaction (all files) 2024-12-15T04:38:29,114 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/C in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:29,114 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/9674f49735494b8f810c5bc615595cbf, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f9394a74a00a47dfbfeb0dc2e8137d23, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f3826e834efe47f886afb177a7ff54b2] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=36.9 K 2024-12-15T04:38:29,114 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9674f49735494b8f810c5bc615595cbf, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1734237503871 2024-12-15T04:38:29,115 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting f9394a74a00a47dfbfeb0dc2e8137d23, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1734237505032 2024-12-15T04:38:29,115 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3826e834efe47f886afb177a7ff54b2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=421, earliestPutTs=1734237507170 2024-12-15T04:38:29,122 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#C#compaction#94 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:29,123 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/c0448110b3c7489fa9254541ae00c5b2 is 50, key is test_row_0/C:col10/1734237507787/Put/seqid=0 2024-12-15T04:38:29,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741933_1109 (size=13323) 2024-12-15T04:38:29,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-12-15T04:38:29,277 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-12-15T04:38:29,278 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:38:29,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-12-15T04:38:29,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-15T04:38:29,280 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:38:29,280 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:38:29,281 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:38:29,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:29,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 281 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237569281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:29,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-15T04:38:29,396 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/4e059809a3ba4b278d0ffdf749065ff3 2024-12-15T04:38:29,405 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/6155440c466e404d8235f3eae27adde0 is 50, key is test_row_0/C:col10/1734237507830/Put/seqid=0 2024-12-15T04:38:29,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741934_1110 (size=12301) 2024-12-15T04:38:29,432 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:29,432 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-15T04:38:29,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:29,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:29,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:29,433 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:29,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:29,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:29,538 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/c0448110b3c7489fa9254541ae00c5b2 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/c0448110b3c7489fa9254541ae00c5b2 2024-12-15T04:38:29,543 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/C of 6443b0fc7191a86cb86de2a8c7e17f47 into c0448110b3c7489fa9254541ae00c5b2(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:29,543 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:29,543 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/C, priority=13, startTime=1734237508664; duration=0sec 2024-12-15T04:38:29,543 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:29,543 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:C 2024-12-15T04:38:29,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-15T04:38:29,584 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:29,585 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-15T04:38:29,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:29,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:29,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:29,585 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:29,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:29,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:29,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:29,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 283 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237569586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:29,737 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:29,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-15T04:38:29,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:29,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:29,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:29,738 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:29,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:29,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:29,809 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/6155440c466e404d8235f3eae27adde0 2024-12-15T04:38:29,814 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/1b15fd5e439348819053b48ced9cc7f4 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/1b15fd5e439348819053b48ced9cc7f4 2024-12-15T04:38:29,824 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/1b15fd5e439348819053b48ced9cc7f4, entries=150, sequenceid=445, filesize=12.0 K 2024-12-15T04:38:29,825 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/4e059809a3ba4b278d0ffdf749065ff3 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/4e059809a3ba4b278d0ffdf749065ff3 2024-12-15T04:38:29,830 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/4e059809a3ba4b278d0ffdf749065ff3, entries=150, sequenceid=445, filesize=12.0 K 2024-12-15T04:38:29,831 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/6155440c466e404d8235f3eae27adde0 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/6155440c466e404d8235f3eae27adde0 2024-12-15T04:38:29,836 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/6155440c466e404d8235f3eae27adde0, entries=150, sequenceid=445, filesize=12.0 K 2024-12-15T04:38:29,837 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 6443b0fc7191a86cb86de2a8c7e17f47 in 884ms, sequenceid=445, compaction requested=false 2024-12-15T04:38:29,837 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:29,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-15T04:38:29,890 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:29,890 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-12-15T04:38:29,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:29,891 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-15T04:38:29,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:29,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:29,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:29,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:29,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:29,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:29,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/5b566afbe1dd4fdea5afafdd786a65ed is 50, key is test_row_0/A:col10/1734237508971/Put/seqid=0 2024-12-15T04:38:29,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741935_1111 (size=12301) 2024-12-15T04:38:30,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:30,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. as already flushing 2024-12-15T04:38:30,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:30,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 306 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237570133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:30,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:30,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 308 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237570235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:30,300 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/5b566afbe1dd4fdea5afafdd786a65ed 2024-12-15T04:38:30,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/98edb7fa514b4d50980d518002c65c6a is 50, key is test_row_0/B:col10/1734237508971/Put/seqid=0 2024-12-15T04:38:30,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741936_1112 (size=12301) 2024-12-15T04:38:30,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-15T04:38:30,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:30,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 310 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237570438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:30,540 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-15T04:38:30,594 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c8de680 to 127.0.0.1:55935 2024-12-15T04:38:30,594 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:38:30,595 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72e97e4b to 127.0.0.1:55935 2024-12-15T04:38:30,595 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:38:30,596 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x490457fd to 127.0.0.1:55935 2024-12-15T04:38:30,596 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:38:30,599 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f6b07e3 to 127.0.0.1:55935 2024-12-15T04:38:30,599 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:38:30,713 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/98edb7fa514b4d50980d518002c65c6a 2024-12-15T04:38:30,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/f537d129df7d4f79be7ba21f5f69e6bb is 50, key is test_row_0/C:col10/1734237508971/Put/seqid=0 2024-12-15T04:38:30,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741937_1113 (size=12301) 2024-12-15T04:38:30,741 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:30,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 312 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46222 deadline: 1734237570741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:31,129 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/f537d129df7d4f79be7ba21f5f69e6bb 2024-12-15T04:38:31,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/5b566afbe1dd4fdea5afafdd786a65ed as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/5b566afbe1dd4fdea5afafdd786a65ed 2024-12-15T04:38:31,146 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/5b566afbe1dd4fdea5afafdd786a65ed, entries=150, sequenceid=460, filesize=12.0 K 2024-12-15T04:38:31,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/98edb7fa514b4d50980d518002c65c6a as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/98edb7fa514b4d50980d518002c65c6a 2024-12-15T04:38:31,151 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/98edb7fa514b4d50980d518002c65c6a, entries=150, sequenceid=460, filesize=12.0 K 2024-12-15T04:38:31,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/f537d129df7d4f79be7ba21f5f69e6bb as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f537d129df7d4f79be7ba21f5f69e6bb 2024-12-15T04:38:31,156 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f537d129df7d4f79be7ba21f5f69e6bb, entries=150, sequenceid=460, filesize=12.0 K 2024-12-15T04:38:31,157 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 6443b0fc7191a86cb86de2a8c7e17f47 in 1266ms, sequenceid=460, compaction requested=true 2024-12-15T04:38:31,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:31,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:31,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-12-15T04:38:31,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-12-15T04:38:31,160 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-15T04:38:31,160 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8770 sec 2024-12-15T04:38:31,161 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.8820 sec 2024-12-15T04:38:31,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:31,247 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-15T04:38:31,248 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6bbb5d8a to 127.0.0.1:55935 2024-12-15T04:38:31,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:31,248 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:38:31,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:31,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:31,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:31,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:31,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:31,257 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/9932e6e94617480383da05aaca4d6c57 is 50, key is test_row_0/A:col10/1734237510132/Put/seqid=0 2024-12-15T04:38:31,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741938_1114 (size=12301) 2024-12-15T04:38:31,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-12-15T04:38:31,385 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-12-15T04:38:31,664 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=485 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/9932e6e94617480383da05aaca4d6c57 2024-12-15T04:38:31,693 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/f34f0eee3f94496f91a33bf898b2105a is 50, key is test_row_0/B:col10/1734237510132/Put/seqid=0 2024-12-15T04:38:31,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741939_1115 (size=12301) 2024-12-15T04:38:32,100 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=485 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/f34f0eee3f94496f91a33bf898b2105a 2024-12-15T04:38:32,118 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/3710a4e96d6e4f59a98c9a7e3668164a is 50, key is test_row_0/C:col10/1734237510132/Put/seqid=0 2024-12-15T04:38:32,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741940_1116 (size=12301) 2024-12-15T04:38:32,399 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x12885408 to 127.0.0.1:55935 2024-12-15T04:38:32,400 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:38:32,429 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72b32f98 to 127.0.0.1:55935 2024-12-15T04:38:32,429 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:38:32,524 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=485 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/3710a4e96d6e4f59a98c9a7e3668164a 2024-12-15T04:38:32,533 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/9932e6e94617480383da05aaca4d6c57 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/9932e6e94617480383da05aaca4d6c57 2024-12-15T04:38:32,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/9932e6e94617480383da05aaca4d6c57, entries=150, sequenceid=485, filesize=12.0 K 2024-12-15T04:38:32,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/f34f0eee3f94496f91a33bf898b2105a as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/f34f0eee3f94496f91a33bf898b2105a 2024-12-15T04:38:32,547 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/f34f0eee3f94496f91a33bf898b2105a, entries=150, sequenceid=485, filesize=12.0 K 2024-12-15T04:38:32,548 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/3710a4e96d6e4f59a98c9a7e3668164a as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/3710a4e96d6e4f59a98c9a7e3668164a 2024-12-15T04:38:32,553 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/3710a4e96d6e4f59a98c9a7e3668164a, entries=150, sequenceid=485, filesize=12.0 K 2024-12-15T04:38:32,553 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=13.42 KB/13740 for 6443b0fc7191a86cb86de2a8c7e17f47 in 1306ms, sequenceid=485, compaction requested=true 2024-12-15T04:38:32,553 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:32,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:38:32,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:32,554 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-15T04:38:32,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:38:32,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:32,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 6443b0fc7191a86cb86de2a8c7e17f47:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:38:32,554 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-15T04:38:32,554 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:32,555 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50226 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-15T04:38:32,555 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/A is initiating minor compaction (all files) 2024-12-15T04:38:32,555 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50226 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-15T04:38:32,555 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/A in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:32,555 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/B is initiating minor compaction (all files) 2024-12-15T04:38:32,555 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/B in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:32,555 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/69d57b134b224bc9be82fa7a4b0d0ebe, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/1b15fd5e439348819053b48ced9cc7f4, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/5b566afbe1dd4fdea5afafdd786a65ed, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/9932e6e94617480383da05aaca4d6c57] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=49.0 K 2024-12-15T04:38:32,555 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/7b0cc8b5534349109ef63f0255e12fe6, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/4e059809a3ba4b278d0ffdf749065ff3, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/98edb7fa514b4d50980d518002c65c6a, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/f34f0eee3f94496f91a33bf898b2105a] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=49.0 K 2024-12-15T04:38:32,556 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 69d57b134b224bc9be82fa7a4b0d0ebe, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=421, earliestPutTs=1734237507170 2024-12-15T04:38:32,556 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b0cc8b5534349109ef63f0255e12fe6, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=421, earliestPutTs=1734237507170 2024-12-15T04:38:32,556 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b15fd5e439348819053b48ced9cc7f4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1734237507830 2024-12-15T04:38:32,556 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e059809a3ba4b278d0ffdf749065ff3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1734237507830 2024-12-15T04:38:32,556 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b566afbe1dd4fdea5afafdd786a65ed, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1734237508964 2024-12-15T04:38:32,556 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 98edb7fa514b4d50980d518002c65c6a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1734237508964 2024-12-15T04:38:32,556 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9932e6e94617480383da05aaca4d6c57, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=485, earliestPutTs=1734237510130 2024-12-15T04:38:32,557 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting f34f0eee3f94496f91a33bf898b2105a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=485, earliestPutTs=1734237510130 2024-12-15T04:38:32,565 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#A#compaction#103 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:32,565 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#B#compaction#102 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:32,565 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/20db8e85fc5f4ca385ffe5aa69723653 is 50, key is test_row_0/A:col10/1734237510132/Put/seqid=0 2024-12-15T04:38:32,565 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/4fabf2a112194c27bcbfdca1328f0a8a is 50, key is test_row_0/B:col10/1734237510132/Put/seqid=0 2024-12-15T04:38:32,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741941_1117 (size=13459) 2024-12-15T04:38:32,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741942_1118 (size=13459) 2024-12-15T04:38:32,984 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/20db8e85fc5f4ca385ffe5aa69723653 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/20db8e85fc5f4ca385ffe5aa69723653 2024-12-15T04:38:32,984 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/4fabf2a112194c27bcbfdca1328f0a8a as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/4fabf2a112194c27bcbfdca1328f0a8a 2024-12-15T04:38:32,989 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/B of 6443b0fc7191a86cb86de2a8c7e17f47 into 4fabf2a112194c27bcbfdca1328f0a8a(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:32,989 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/A of 6443b0fc7191a86cb86de2a8c7e17f47 into 20db8e85fc5f4ca385ffe5aa69723653(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:32,990 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:32,990 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:32,990 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/B, priority=12, startTime=1734237512554; duration=0sec 2024-12-15T04:38:32,990 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/A, priority=12, startTime=1734237512553; duration=0sec 2024-12-15T04:38:32,990 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:32,990 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:A 2024-12-15T04:38:32,990 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:32,990 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:B 2024-12-15T04:38:32,990 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-15T04:38:32,991 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50226 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-15T04:38:32,991 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 6443b0fc7191a86cb86de2a8c7e17f47/C is initiating minor compaction (all files) 2024-12-15T04:38:32,991 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 6443b0fc7191a86cb86de2a8c7e17f47/C in TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:32,992 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/c0448110b3c7489fa9254541ae00c5b2, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/6155440c466e404d8235f3eae27adde0, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f537d129df7d4f79be7ba21f5f69e6bb, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/3710a4e96d6e4f59a98c9a7e3668164a] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp, totalSize=49.0 K 2024-12-15T04:38:32,992 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0448110b3c7489fa9254541ae00c5b2, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=421, earliestPutTs=1734237507170 2024-12-15T04:38:32,992 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6155440c466e404d8235f3eae27adde0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=445, earliestPutTs=1734237507830 2024-12-15T04:38:32,993 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting f537d129df7d4f79be7ba21f5f69e6bb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1734237508964 2024-12-15T04:38:32,993 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3710a4e96d6e4f59a98c9a7e3668164a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=485, earliestPutTs=1734237510130 2024-12-15T04:38:33,002 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 6443b0fc7191a86cb86de2a8c7e17f47#C#compaction#104 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:33,002 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/c802f2e0625d4e398ebecb4c31d40bc6 is 50, key is test_row_0/C:col10/1734237510132/Put/seqid=0 2024-12-15T04:38:33,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741943_1119 (size=13459) 2024-12-15T04:38:33,417 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/c802f2e0625d4e398ebecb4c31d40bc6 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/c802f2e0625d4e398ebecb4c31d40bc6 2024-12-15T04:38:33,424 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 6443b0fc7191a86cb86de2a8c7e17f47/C of 6443b0fc7191a86cb86de2a8c7e17f47 into c802f2e0625d4e398ebecb4c31d40bc6(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:33,424 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:33,424 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47., storeName=6443b0fc7191a86cb86de2a8c7e17f47/C, priority=12, startTime=1734237512554; duration=0sec 2024-12-15T04:38:33,425 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:33,425 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 6443b0fc7191a86cb86de2a8c7e17f47:C 2024-12-15T04:38:35,857 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x18603bb9 to 127.0.0.1:55935 2024-12-15T04:38:35,858 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:38:35,884 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04977266 to 127.0.0.1:55935 2024-12-15T04:38:35,884 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:38:35,884 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-15T04:38:35,884 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 35 2024-12-15T04:38:35,884 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 36 2024-12-15T04:38:35,884 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 62 2024-12-15T04:38:35,884 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 190 2024-12-15T04:38:35,884 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 63 2024-12-15T04:38:35,884 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-15T04:38:35,884 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8301 2024-12-15T04:38:35,884 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7956 2024-12-15T04:38:35,884 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-15T04:38:35,884 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3509 2024-12-15T04:38:35,884 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10526 rows 2024-12-15T04:38:35,884 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3529 2024-12-15T04:38:35,884 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10587 rows 2024-12-15T04:38:35,884 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-15T04:38:35,884 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0e98ea32 to 127.0.0.1:55935 2024-12-15T04:38:35,884 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:38:35,887 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-15T04:38:35,891 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-15T04:38:35,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-15T04:38:35,897 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237515897"}]},"ts":"1734237515897"} 2024-12-15T04:38:35,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-15T04:38:35,899 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-15T04:38:35,906 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-15T04:38:35,908 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-15T04:38:35,912 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=28, ppid=27, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6443b0fc7191a86cb86de2a8c7e17f47, UNASSIGN}] 2024-12-15T04:38:35,913 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=28, ppid=27, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=6443b0fc7191a86cb86de2a8c7e17f47, UNASSIGN 2024-12-15T04:38:35,914 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=28 updating hbase:meta row=6443b0fc7191a86cb86de2a8c7e17f47, regionState=CLOSING, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:35,915 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T04:38:35,915 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; CloseRegionProcedure 6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035}] 2024-12-15T04:38:35,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-15T04:38:36,071 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:36,075 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] handler.UnassignRegionHandler(124): Close 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:36,076 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T04:38:36,078 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1681): Closing 6443b0fc7191a86cb86de2a8c7e17f47, disabling compactions & flushes 2024-12-15T04:38:36,078 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:36,078 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:36,078 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. after waiting 0 ms 2024-12-15T04:38:36,078 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:36,079 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(2837): Flushing 6443b0fc7191a86cb86de2a8c7e17f47 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-15T04:38:36,079 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=A 2024-12-15T04:38:36,080 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:36,080 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=B 2024-12-15T04:38:36,080 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:36,080 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 6443b0fc7191a86cb86de2a8c7e17f47, store=C 2024-12-15T04:38:36,081 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:36,086 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/6e474f798b3943238d89bbd02363a1ca is 50, key is test_row_0/A:col10/1734237512396/Put/seqid=0 2024-12-15T04:38:36,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741944_1120 (size=12301) 2024-12-15T04:38:36,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-15T04:38:36,492 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=495 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/6e474f798b3943238d89bbd02363a1ca 2024-12-15T04:38:36,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-15T04:38:36,509 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/56129c758a8446e3b3a603e1b75c45ec is 50, key is test_row_0/B:col10/1734237512396/Put/seqid=0 2024-12-15T04:38:36,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741945_1121 (size=12301) 2024-12-15T04:38:36,915 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=495 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/56129c758a8446e3b3a603e1b75c45ec 2024-12-15T04:38:36,929 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/c01a46934840412da5be27d3248ae0da is 50, key is test_row_0/C:col10/1734237512396/Put/seqid=0 2024-12-15T04:38:36,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741946_1122 (size=12301) 2024-12-15T04:38:37,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-15T04:38:37,334 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=495 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/c01a46934840412da5be27d3248ae0da 2024-12-15T04:38:37,346 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/A/6e474f798b3943238d89bbd02363a1ca as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/6e474f798b3943238d89bbd02363a1ca 2024-12-15T04:38:37,352 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/6e474f798b3943238d89bbd02363a1ca, entries=150, sequenceid=495, filesize=12.0 K 2024-12-15T04:38:37,353 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/B/56129c758a8446e3b3a603e1b75c45ec as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/56129c758a8446e3b3a603e1b75c45ec 2024-12-15T04:38:37,359 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/56129c758a8446e3b3a603e1b75c45ec, entries=150, sequenceid=495, filesize=12.0 K 2024-12-15T04:38:37,361 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/.tmp/C/c01a46934840412da5be27d3248ae0da as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/c01a46934840412da5be27d3248ae0da 2024-12-15T04:38:37,366 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/c01a46934840412da5be27d3248ae0da, entries=150, sequenceid=495, filesize=12.0 K 2024-12-15T04:38:37,367 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 6443b0fc7191a86cb86de2a8c7e17f47 in 1289ms, sequenceid=495, compaction requested=false 2024-12-15T04:38:37,368 DEBUG [StoreCloser-TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/ca561e8d1c764e059f9537a5e5fae500, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/fd1e2eeb276e415295e07f9c004adc1d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/205f75d217004b1c95064f134e018b82, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/789043c3a72f4ad49eab4c575e4b7fa0, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/b4384aa798334fccaf8fb47d5ee833ce, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/e980cff43fd8468ca170639eb73d3882, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/cf85e948db6340eb89be499dfdd41b36, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/772c13b6a93d4b1682a9b1acd358f435, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/9e9babc9515c45abb5a5c5b5da551edc, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/30a1300b078c4f75aef4d4179507b1d1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/149af2119d0c4937ad9458c00092494f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/9804398af3c84c4fb673d0a0fab071ce, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/6a63db6375324632b788ba68b136872f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/0fdf702de5b347429651e45bf6146446, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/52018197caba41c1b9c4198dbb27a650, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/655f6b0a7c224bc6bc468a228c13589a, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/94ff6d0cdfa54b03922c6f972dc2f43f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/41b9606eeec04c6196b7c0026df92063, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/5cf74a1384684e4e8f1af1b64b15442a, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/fc9dfd84d574492b9bdf27861b2e81ca, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/93bd2e6a761d4d9d8feceeb40611806c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4a41a1994d9c4e6b99b7344fdc7d6c27, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/b98e820c1c1d41548f30b3d95236fb13, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4b49f35e4fe048f19856fd13b6580941, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/e85dd22b8eb4494eac3eb78e36c48b47, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/8b4f7e41c27c4f96bd42c4189a551516, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/9cbec089649741f38508a30a1b3febb5, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4aacd127f6d147acbfeeca93b8813560, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/95e5867c533e44c69da4918b1ff078ac, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4845fb9eb1db45719b1c467f838d20df, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/69d57b134b224bc9be82fa7a4b0d0ebe, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/1b15fd5e439348819053b48ced9cc7f4, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/5b566afbe1dd4fdea5afafdd786a65ed, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/9932e6e94617480383da05aaca4d6c57] to archive 2024-12-15T04:38:37,371 DEBUG [StoreCloser-TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-15T04:38:37,378 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/b4384aa798334fccaf8fb47d5ee833ce to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/b4384aa798334fccaf8fb47d5ee833ce 2024-12-15T04:38:37,378 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/cf85e948db6340eb89be499dfdd41b36 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/cf85e948db6340eb89be499dfdd41b36 2024-12-15T04:38:37,379 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/ca561e8d1c764e059f9537a5e5fae500 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/ca561e8d1c764e059f9537a5e5fae500 2024-12-15T04:38:37,379 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/fd1e2eeb276e415295e07f9c004adc1d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/fd1e2eeb276e415295e07f9c004adc1d 2024-12-15T04:38:37,379 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/772c13b6a93d4b1682a9b1acd358f435 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/772c13b6a93d4b1682a9b1acd358f435 2024-12-15T04:38:37,379 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/e980cff43fd8468ca170639eb73d3882 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/e980cff43fd8468ca170639eb73d3882 2024-12-15T04:38:37,379 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/205f75d217004b1c95064f134e018b82 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/205f75d217004b1c95064f134e018b82 2024-12-15T04:38:37,379 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/789043c3a72f4ad49eab4c575e4b7fa0 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/789043c3a72f4ad49eab4c575e4b7fa0 2024-12-15T04:38:37,382 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/30a1300b078c4f75aef4d4179507b1d1 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/30a1300b078c4f75aef4d4179507b1d1 2024-12-15T04:38:37,382 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/149af2119d0c4937ad9458c00092494f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/149af2119d0c4937ad9458c00092494f 2024-12-15T04:38:37,382 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/9e9babc9515c45abb5a5c5b5da551edc to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/9e9babc9515c45abb5a5c5b5da551edc 2024-12-15T04:38:37,382 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/9804398af3c84c4fb673d0a0fab071ce to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/9804398af3c84c4fb673d0a0fab071ce 2024-12-15T04:38:37,382 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/52018197caba41c1b9c4198dbb27a650 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/52018197caba41c1b9c4198dbb27a650 2024-12-15T04:38:37,382 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/655f6b0a7c224bc6bc468a228c13589a to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/655f6b0a7c224bc6bc468a228c13589a 2024-12-15T04:38:37,383 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/0fdf702de5b347429651e45bf6146446 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/0fdf702de5b347429651e45bf6146446 2024-12-15T04:38:37,383 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/6a63db6375324632b788ba68b136872f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/6a63db6375324632b788ba68b136872f 2024-12-15T04:38:37,386 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/94ff6d0cdfa54b03922c6f972dc2f43f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/94ff6d0cdfa54b03922c6f972dc2f43f 2024-12-15T04:38:37,387 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/b98e820c1c1d41548f30b3d95236fb13 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/b98e820c1c1d41548f30b3d95236fb13 2024-12-15T04:38:37,387 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4a41a1994d9c4e6b99b7344fdc7d6c27 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4a41a1994d9c4e6b99b7344fdc7d6c27 2024-12-15T04:38:37,387 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/41b9606eeec04c6196b7c0026df92063 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/41b9606eeec04c6196b7c0026df92063 2024-12-15T04:38:37,387 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/fc9dfd84d574492b9bdf27861b2e81ca to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/fc9dfd84d574492b9bdf27861b2e81ca 2024-12-15T04:38:37,387 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/93bd2e6a761d4d9d8feceeb40611806c to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/93bd2e6a761d4d9d8feceeb40611806c 2024-12-15T04:38:37,387 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4b49f35e4fe048f19856fd13b6580941 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4b49f35e4fe048f19856fd13b6580941 2024-12-15T04:38:37,388 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/e85dd22b8eb4494eac3eb78e36c48b47 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/e85dd22b8eb4494eac3eb78e36c48b47 2024-12-15T04:38:37,388 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/5cf74a1384684e4e8f1af1b64b15442a to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/5cf74a1384684e4e8f1af1b64b15442a 2024-12-15T04:38:37,389 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/9cbec089649741f38508a30a1b3febb5 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/9cbec089649741f38508a30a1b3febb5 2024-12-15T04:38:37,389 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/8b4f7e41c27c4f96bd42c4189a551516 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/8b4f7e41c27c4f96bd42c4189a551516 2024-12-15T04:38:37,389 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4aacd127f6d147acbfeeca93b8813560 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4aacd127f6d147acbfeeca93b8813560 2024-12-15T04:38:37,389 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/95e5867c533e44c69da4918b1ff078ac to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/95e5867c533e44c69da4918b1ff078ac 2024-12-15T04:38:37,390 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4845fb9eb1db45719b1c467f838d20df to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/4845fb9eb1db45719b1c467f838d20df 2024-12-15T04:38:37,390 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/1b15fd5e439348819053b48ced9cc7f4 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/1b15fd5e439348819053b48ced9cc7f4 2024-12-15T04:38:37,390 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/5b566afbe1dd4fdea5afafdd786a65ed to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/5b566afbe1dd4fdea5afafdd786a65ed 2024-12-15T04:38:37,390 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/9932e6e94617480383da05aaca4d6c57 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/9932e6e94617480383da05aaca4d6c57 2024-12-15T04:38:37,391 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/69d57b134b224bc9be82fa7a4b0d0ebe to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/69d57b134b224bc9be82fa7a4b0d0ebe 2024-12-15T04:38:37,404 DEBUG [StoreCloser-TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/c23dd2cd6e544ed884327e48a89722e4, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/b896ef23c0f14231aa94eb3e073fae98, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/4c0ea44de593420487517b88784c9de5, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/e1c54d270b704035a03946fbfb5fb0e0, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/c31054eefd084daca88aee8a4241a028, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/7fa0aa66aa014304ad0cff6ecc5bce1d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/2d1656dd8af54a0e8bffdff913eccb59, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/a3bf88afbaaf49ef95da48667c661ed6, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/a7fb5a8637ae42b1be1dc256d533c383, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/3c94270c922144999d0fbcf206e8bbce, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/e3752d5b939244baa78a56807d567f74, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/cb9da5981c904e5492c6625b9601321b, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/32b223adf2af450b9e3b06d1ef466618, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/e504f21b109c461aaaba8324622c5499, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/8be34ce6dc05476e9ff3d757a7d7d34c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/f8160a3872d7427182507fd059c7ec13, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/9f2b0b387a134143bcc26c9c7fd54e86, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/601cb9debaca474aa5bc89a57fad81f1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/0983e0f198de41658ef78c6fc4c4ff21, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/d80f4abc4fcc4e5cad6a062c39206286, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/63a4b7833de74f449632769c68ab9edf, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/d62df976dead403ba19d0c1b34c2ac22, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/fbf9b68fad664b5cb4a11ad9b2890304, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/9e2ad297aee94eccade8b812ef371435, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/2fc2489c014545d7adf0a0db71e6147d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/eaf56634106f4367ab72f1d3b17f09b4, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/a1599cc2e4324873801f16dc30ce0e18, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/e45c2e705f544371893ec59ebb2b5e44, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/f10ab9e0907e426fafa5f1b7da78ec7c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/7b0cc8b5534349109ef63f0255e12fe6, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/c49dfce6f08c4db197b2843b0c12d8c8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/4e059809a3ba4b278d0ffdf749065ff3, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/98edb7fa514b4d50980d518002c65c6a, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/f34f0eee3f94496f91a33bf898b2105a] to archive 2024-12-15T04:38:37,405 DEBUG [StoreCloser-TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-15T04:38:37,408 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/c23dd2cd6e544ed884327e48a89722e4 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/c23dd2cd6e544ed884327e48a89722e4 2024-12-15T04:38:37,408 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/4c0ea44de593420487517b88784c9de5 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/4c0ea44de593420487517b88784c9de5 2024-12-15T04:38:37,408 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/b896ef23c0f14231aa94eb3e073fae98 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/b896ef23c0f14231aa94eb3e073fae98 2024-12-15T04:38:37,408 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/c31054eefd084daca88aee8a4241a028 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/c31054eefd084daca88aee8a4241a028 2024-12-15T04:38:37,408 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/a3bf88afbaaf49ef95da48667c661ed6 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/a3bf88afbaaf49ef95da48667c661ed6 2024-12-15T04:38:37,408 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/7fa0aa66aa014304ad0cff6ecc5bce1d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/7fa0aa66aa014304ad0cff6ecc5bce1d 2024-12-15T04:38:37,408 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/e1c54d270b704035a03946fbfb5fb0e0 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/e1c54d270b704035a03946fbfb5fb0e0 2024-12-15T04:38:37,409 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/2d1656dd8af54a0e8bffdff913eccb59 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/2d1656dd8af54a0e8bffdff913eccb59 2024-12-15T04:38:37,410 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/a7fb5a8637ae42b1be1dc256d533c383 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/a7fb5a8637ae42b1be1dc256d533c383 2024-12-15T04:38:37,410 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/cb9da5981c904e5492c6625b9601321b to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/cb9da5981c904e5492c6625b9601321b 2024-12-15T04:38:37,410 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/32b223adf2af450b9e3b06d1ef466618 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/32b223adf2af450b9e3b06d1ef466618 2024-12-15T04:38:37,411 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/3c94270c922144999d0fbcf206e8bbce to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/3c94270c922144999d0fbcf206e8bbce 2024-12-15T04:38:37,411 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/e3752d5b939244baa78a56807d567f74 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/e3752d5b939244baa78a56807d567f74 2024-12-15T04:38:37,411 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/f8160a3872d7427182507fd059c7ec13 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/f8160a3872d7427182507fd059c7ec13 2024-12-15T04:38:37,411 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/8be34ce6dc05476e9ff3d757a7d7d34c to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/8be34ce6dc05476e9ff3d757a7d7d34c 2024-12-15T04:38:37,412 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/e504f21b109c461aaaba8324622c5499 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/e504f21b109c461aaaba8324622c5499 2024-12-15T04:38:37,413 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/9f2b0b387a134143bcc26c9c7fd54e86 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/9f2b0b387a134143bcc26c9c7fd54e86 2024-12-15T04:38:37,413 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/601cb9debaca474aa5bc89a57fad81f1 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/601cb9debaca474aa5bc89a57fad81f1 2024-12-15T04:38:37,414 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/0983e0f198de41658ef78c6fc4c4ff21 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/0983e0f198de41658ef78c6fc4c4ff21 2024-12-15T04:38:37,414 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/63a4b7833de74f449632769c68ab9edf to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/63a4b7833de74f449632769c68ab9edf 2024-12-15T04:38:37,415 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/d80f4abc4fcc4e5cad6a062c39206286 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/d80f4abc4fcc4e5cad6a062c39206286 2024-12-15T04:38:37,415 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/d62df976dead403ba19d0c1b34c2ac22 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/d62df976dead403ba19d0c1b34c2ac22 2024-12-15T04:38:37,416 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/fbf9b68fad664b5cb4a11ad9b2890304 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/fbf9b68fad664b5cb4a11ad9b2890304 2024-12-15T04:38:37,417 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/2fc2489c014545d7adf0a0db71e6147d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/2fc2489c014545d7adf0a0db71e6147d 2024-12-15T04:38:37,417 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/9e2ad297aee94eccade8b812ef371435 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/9e2ad297aee94eccade8b812ef371435 2024-12-15T04:38:37,418 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/7b0cc8b5534349109ef63f0255e12fe6 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/7b0cc8b5534349109ef63f0255e12fe6 2024-12-15T04:38:37,418 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/a1599cc2e4324873801f16dc30ce0e18 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/a1599cc2e4324873801f16dc30ce0e18 2024-12-15T04:38:37,418 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/c49dfce6f08c4db197b2843b0c12d8c8 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/c49dfce6f08c4db197b2843b0c12d8c8 2024-12-15T04:38:37,418 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/eaf56634106f4367ab72f1d3b17f09b4 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/eaf56634106f4367ab72f1d3b17f09b4 2024-12-15T04:38:37,418 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/e45c2e705f544371893ec59ebb2b5e44 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/e45c2e705f544371893ec59ebb2b5e44 2024-12-15T04:38:37,418 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/f10ab9e0907e426fafa5f1b7da78ec7c to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/f10ab9e0907e426fafa5f1b7da78ec7c 2024-12-15T04:38:37,419 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/4e059809a3ba4b278d0ffdf749065ff3 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/4e059809a3ba4b278d0ffdf749065ff3 2024-12-15T04:38:37,419 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/98edb7fa514b4d50980d518002c65c6a to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/98edb7fa514b4d50980d518002c65c6a 2024-12-15T04:38:37,420 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/f34f0eee3f94496f91a33bf898b2105a to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/f34f0eee3f94496f91a33bf898b2105a 2024-12-15T04:38:37,421 DEBUG [StoreCloser-TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/11c541a1a63c41d79e75aae1ab212f71, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/90b6c7b01bf048e98fdb278de48a70b5, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/9b8a342e34fd4009ad69f6711bf453cd, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/15fdac4b69df4f29b53f3f9dd4156fa6, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/ea65fce7bfcf49e59d3a2289b9ed6e8b, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/94c8543480a1468085434c6247f758d1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/ec4f745b9a5e4071b5969e05bd149166, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/7c6b1b10806c42c6a37299fed6b10240, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/067918eec2f4425d8c6d4047d721e827, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/a24ff2cf0d324b20a34e0c11d684415f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/a5ce8476c9e8447f90faf66a9729a5e8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/1959a4b6f9774878b5889b5716426007, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/091f586ce84a4b54a36949b7ac6939db, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/b88127cd3e194d39bc32567f2e436a2b, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/6b7beb493bc5466aaa6bcdd63c4e9ac6, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/fd65579037744fd5b26da1a50f75e059, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/9406626a5b624ed18decaecaaae66562, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/a7a40bc87ad04d7d979f789beabc91c6, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/e9a0a99e729644cead3ea33cb0bf88d6, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f52791f234064173b42f5dd426d1ca03, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/cae6591611b148ebaef0d94a391fac9d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/c1c349c710584588aac03e60608ac03b, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/91df86347d4f42e6bcd22bdecefe6428, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/9c1d52649b924f78a9adf2a14e0bfc46, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/48553b3f609c4321b8f63f5a91ef37e0, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/7367e35a70b64a46a462b285653c2fe7, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/9674f49735494b8f810c5bc615595cbf, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/6e02323714824a1e866df6155eb3e44c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f9394a74a00a47dfbfeb0dc2e8137d23, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/c0448110b3c7489fa9254541ae00c5b2, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f3826e834efe47f886afb177a7ff54b2, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/6155440c466e404d8235f3eae27adde0, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f537d129df7d4f79be7ba21f5f69e6bb, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/3710a4e96d6e4f59a98c9a7e3668164a] to archive 2024-12-15T04:38:37,422 DEBUG [StoreCloser-TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-15T04:38:37,425 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/9b8a342e34fd4009ad69f6711bf453cd to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/9b8a342e34fd4009ad69f6711bf453cd 2024-12-15T04:38:37,425 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/ea65fce7bfcf49e59d3a2289b9ed6e8b to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/ea65fce7bfcf49e59d3a2289b9ed6e8b 2024-12-15T04:38:37,425 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/94c8543480a1468085434c6247f758d1 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/94c8543480a1468085434c6247f758d1 2024-12-15T04:38:37,425 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/15fdac4b69df4f29b53f3f9dd4156fa6 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/15fdac4b69df4f29b53f3f9dd4156fa6 2024-12-15T04:38:37,425 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/90b6c7b01bf048e98fdb278de48a70b5 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/90b6c7b01bf048e98fdb278de48a70b5 2024-12-15T04:38:37,426 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/7c6b1b10806c42c6a37299fed6b10240 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/7c6b1b10806c42c6a37299fed6b10240 2024-12-15T04:38:37,426 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/11c541a1a63c41d79e75aae1ab212f71 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/11c541a1a63c41d79e75aae1ab212f71 2024-12-15T04:38:37,427 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/a24ff2cf0d324b20a34e0c11d684415f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/a24ff2cf0d324b20a34e0c11d684415f 2024-12-15T04:38:37,428 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/a5ce8476c9e8447f90faf66a9729a5e8 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/a5ce8476c9e8447f90faf66a9729a5e8 2024-12-15T04:38:37,428 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/ec4f745b9a5e4071b5969e05bd149166 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/ec4f745b9a5e4071b5969e05bd149166 2024-12-15T04:38:37,428 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/067918eec2f4425d8c6d4047d721e827 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/067918eec2f4425d8c6d4047d721e827 2024-12-15T04:38:37,429 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/b88127cd3e194d39bc32567f2e436a2b to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/b88127cd3e194d39bc32567f2e436a2b 2024-12-15T04:38:37,429 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/1959a4b6f9774878b5889b5716426007 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/1959a4b6f9774878b5889b5716426007 2024-12-15T04:38:37,429 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/091f586ce84a4b54a36949b7ac6939db to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/091f586ce84a4b54a36949b7ac6939db 2024-12-15T04:38:37,430 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/6b7beb493bc5466aaa6bcdd63c4e9ac6 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/6b7beb493bc5466aaa6bcdd63c4e9ac6 2024-12-15T04:38:37,430 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/e9a0a99e729644cead3ea33cb0bf88d6 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/e9a0a99e729644cead3ea33cb0bf88d6 2024-12-15T04:38:37,431 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/fd65579037744fd5b26da1a50f75e059 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/fd65579037744fd5b26da1a50f75e059 2024-12-15T04:38:37,431 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f52791f234064173b42f5dd426d1ca03 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f52791f234064173b42f5dd426d1ca03 2024-12-15T04:38:37,431 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/9406626a5b624ed18decaecaaae66562 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/9406626a5b624ed18decaecaaae66562 2024-12-15T04:38:37,432 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/c1c349c710584588aac03e60608ac03b to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/c1c349c710584588aac03e60608ac03b 2024-12-15T04:38:37,432 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/cae6591611b148ebaef0d94a391fac9d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/cae6591611b148ebaef0d94a391fac9d 2024-12-15T04:38:37,433 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/9c1d52649b924f78a9adf2a14e0bfc46 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/9c1d52649b924f78a9adf2a14e0bfc46 2024-12-15T04:38:37,433 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/48553b3f609c4321b8f63f5a91ef37e0 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/48553b3f609c4321b8f63f5a91ef37e0 2024-12-15T04:38:37,433 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/91df86347d4f42e6bcd22bdecefe6428 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/91df86347d4f42e6bcd22bdecefe6428 2024-12-15T04:38:37,434 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/7367e35a70b64a46a462b285653c2fe7 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/7367e35a70b64a46a462b285653c2fe7 2024-12-15T04:38:37,434 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/9674f49735494b8f810c5bc615595cbf to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/9674f49735494b8f810c5bc615595cbf 2024-12-15T04:38:37,434 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/a7a40bc87ad04d7d979f789beabc91c6 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/a7a40bc87ad04d7d979f789beabc91c6 2024-12-15T04:38:37,435 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f9394a74a00a47dfbfeb0dc2e8137d23 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f9394a74a00a47dfbfeb0dc2e8137d23 2024-12-15T04:38:37,435 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/6e02323714824a1e866df6155eb3e44c to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/6e02323714824a1e866df6155eb3e44c 2024-12-15T04:38:37,436 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/c0448110b3c7489fa9254541ae00c5b2 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/c0448110b3c7489fa9254541ae00c5b2 2024-12-15T04:38:37,436 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f3826e834efe47f886afb177a7ff54b2 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f3826e834efe47f886afb177a7ff54b2 2024-12-15T04:38:37,436 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/6155440c466e404d8235f3eae27adde0 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/6155440c466e404d8235f3eae27adde0 2024-12-15T04:38:37,436 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f537d129df7d4f79be7ba21f5f69e6bb to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/f537d129df7d4f79be7ba21f5f69e6bb 2024-12-15T04:38:37,436 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/3710a4e96d6e4f59a98c9a7e3668164a to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/3710a4e96d6e4f59a98c9a7e3668164a 2024-12-15T04:38:37,443 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/recovered.edits/498.seqid, newMaxSeqId=498, maxSeqId=1 2024-12-15T04:38:37,445 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47. 2024-12-15T04:38:37,445 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] regionserver.HRegion(1635): Region close journal for 6443b0fc7191a86cb86de2a8c7e17f47: 2024-12-15T04:38:37,447 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=29}] handler.UnassignRegionHandler(170): Closed 6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:37,448 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=28 updating hbase:meta row=6443b0fc7191a86cb86de2a8c7e17f47, regionState=CLOSED 2024-12-15T04:38:37,450 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-12-15T04:38:37,451 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; CloseRegionProcedure 6443b0fc7191a86cb86de2a8c7e17f47, server=e56de37b85b3,43199,1734237482035 in 1.5340 sec 2024-12-15T04:38:37,451 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=28, resume processing ppid=27 2024-12-15T04:38:37,451 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, ppid=27, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=6443b0fc7191a86cb86de2a8c7e17f47, UNASSIGN in 1.5380 sec 2024-12-15T04:38:37,453 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-15T04:38:37,453 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5450 sec 2024-12-15T04:38:37,454 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237517454"}]},"ts":"1734237517454"} 2024-12-15T04:38:37,456 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-15T04:38:37,489 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-15T04:38:37,492 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5980 sec 2024-12-15T04:38:37,919 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-15T04:38:37,921 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40990, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-15T04:38:38,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-12-15T04:38:38,005 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-12-15T04:38:38,009 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-15T04:38:38,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:38:38,017 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=30, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:38:38,020 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=30, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:38:38,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-15T04:38:38,023 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:38,028 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A, FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B, FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C, FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/recovered.edits] 2024-12-15T04:38:38,032 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/20db8e85fc5f4ca385ffe5aa69723653 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/20db8e85fc5f4ca385ffe5aa69723653 2024-12-15T04:38:38,033 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/6e474f798b3943238d89bbd02363a1ca to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/A/6e474f798b3943238d89bbd02363a1ca 2024-12-15T04:38:38,036 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/4fabf2a112194c27bcbfdca1328f0a8a to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/4fabf2a112194c27bcbfdca1328f0a8a 2024-12-15T04:38:38,036 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/56129c758a8446e3b3a603e1b75c45ec to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/B/56129c758a8446e3b3a603e1b75c45ec 2024-12-15T04:38:38,039 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/c01a46934840412da5be27d3248ae0da to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/c01a46934840412da5be27d3248ae0da 2024-12-15T04:38:38,039 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/c802f2e0625d4e398ebecb4c31d40bc6 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/C/c802f2e0625d4e398ebecb4c31d40bc6 2024-12-15T04:38:38,042 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/recovered.edits/498.seqid to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47/recovered.edits/498.seqid 2024-12-15T04:38:38,042 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/6443b0fc7191a86cb86de2a8c7e17f47 2024-12-15T04:38:38,043 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-15T04:38:38,047 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=30, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:38:38,051 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-15T04:38:38,054 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-15T04:38:38,078 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-15T04:38:38,080 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=30, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:38:38,080 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-15T04:38:38,080 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734237518080"}]},"ts":"9223372036854775807"} 2024-12-15T04:38:38,083 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-15T04:38:38,083 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 6443b0fc7191a86cb86de2a8c7e17f47, NAME => 'TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47.', STARTKEY => '', ENDKEY => ''}] 2024-12-15T04:38:38,083 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-15T04:38:38,083 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734237518083"}]},"ts":"9223372036854775807"} 2024-12-15T04:38:38,086 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-15T04:38:38,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-15T04:38:38,123 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=30, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:38:38,125 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 112 msec 2024-12-15T04:38:38,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-15T04:38:38,325 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-12-15T04:38:38,339 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMixedAtomicity Thread=245 (was 219) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;e56de37b85b3:43199-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xf95a041-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xf95a041-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-153955523_22 at /127.0.0.1:33320 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xf95a041-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0xf95a041-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=459 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=249 (was 86) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4678 (was 5226) 2024-12-15T04:38:38,349 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=245, OpenFileDescriptor=459, MaxFileDescriptor=1048576, SystemLoadAverage=249, ProcessCount=11, AvailableMemoryMB=4677 2024-12-15T04:38:38,351 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-15T04:38:38,352 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-15T04:38:38,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=31, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-15T04:38:38,354 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-15T04:38:38,354 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:38,354 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 31 2024-12-15T04:38:38,355 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-15T04:38:38,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-12-15T04:38:38,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741947_1123 (size=963) 2024-12-15T04:38:38,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-12-15T04:38:38,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-12-15T04:38:38,769 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9 2024-12-15T04:38:38,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741948_1124 (size=53) 2024-12-15T04:38:38,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-12-15T04:38:39,181 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T04:38:39,181 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 574a05e47406cea06ff474376a420947, disabling compactions & flushes 2024-12-15T04:38:39,181 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:39,182 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:39,182 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. after waiting 0 ms 2024-12-15T04:38:39,182 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:39,182 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:39,182 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:39,185 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-15T04:38:39,186 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734237519185"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734237519185"}]},"ts":"1734237519185"} 2024-12-15T04:38:39,188 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-15T04:38:39,189 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-15T04:38:39,189 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237519189"}]},"ts":"1734237519189"} 2024-12-15T04:38:39,190 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-15T04:38:39,231 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=574a05e47406cea06ff474376a420947, ASSIGN}] 2024-12-15T04:38:39,233 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=574a05e47406cea06ff474376a420947, ASSIGN 2024-12-15T04:38:39,235 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=574a05e47406cea06ff474376a420947, ASSIGN; state=OFFLINE, location=e56de37b85b3,43199,1734237482035; forceNewPlan=false, retain=false 2024-12-15T04:38:39,386 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=574a05e47406cea06ff474376a420947, regionState=OPENING, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:39,389 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; OpenRegionProcedure 574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035}] 2024-12-15T04:38:39,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-12-15T04:38:39,543 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:39,550 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:39,550 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7285): Opening region: {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} 2024-12-15T04:38:39,551 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 574a05e47406cea06ff474376a420947 2024-12-15T04:38:39,551 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T04:38:39,552 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7327): checking encryption for 574a05e47406cea06ff474376a420947 2024-12-15T04:38:39,552 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7330): checking classloading for 574a05e47406cea06ff474376a420947 2024-12-15T04:38:39,555 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 574a05e47406cea06ff474376a420947 2024-12-15T04:38:39,557 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:38:39,558 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 574a05e47406cea06ff474376a420947 columnFamilyName A 2024-12-15T04:38:39,558 DEBUG [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:39,559 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] regionserver.HStore(327): Store=574a05e47406cea06ff474376a420947/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:38:39,559 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 574a05e47406cea06ff474376a420947 2024-12-15T04:38:39,560 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:38:39,561 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 574a05e47406cea06ff474376a420947 columnFamilyName B 2024-12-15T04:38:39,561 DEBUG [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:39,562 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] regionserver.HStore(327): Store=574a05e47406cea06ff474376a420947/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:38:39,562 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 574a05e47406cea06ff474376a420947 2024-12-15T04:38:39,563 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:38:39,563 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 574a05e47406cea06ff474376a420947 columnFamilyName C 2024-12-15T04:38:39,563 DEBUG [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:39,564 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] regionserver.HStore(327): Store=574a05e47406cea06ff474376a420947/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:38:39,564 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:39,564 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947 2024-12-15T04:38:39,565 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947 2024-12-15T04:38:39,566 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-15T04:38:39,568 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1085): writing seq id for 574a05e47406cea06ff474376a420947 2024-12-15T04:38:39,570 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T04:38:39,570 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1102): Opened 574a05e47406cea06ff474376a420947; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66958270, jitterRate=-0.002244025468826294}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-15T04:38:39,571 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1001): Region open journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:39,572 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., pid=33, masterSystemTime=1734237519542 2024-12-15T04:38:39,574 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:39,574 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:39,574 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=574a05e47406cea06ff474376a420947, regionState=OPEN, openSeqNum=2, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:39,577 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-12-15T04:38:39,577 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; OpenRegionProcedure 574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 in 186 msec 2024-12-15T04:38:39,578 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=32, resume processing ppid=31 2024-12-15T04:38:39,578 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=31, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=574a05e47406cea06ff474376a420947, ASSIGN in 346 msec 2024-12-15T04:38:39,579 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-15T04:38:39,579 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237519579"}]},"ts":"1734237519579"} 2024-12-15T04:38:39,580 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-15T04:38:39,615 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=31, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-15T04:38:39,616 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2630 sec 2024-12-15T04:38:40,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=31 2024-12-15T04:38:40,469 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 31 completed 2024-12-15T04:38:40,472 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x407e6b5c to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6eb305fc 2024-12-15T04:38:40,562 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@245d85d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:38:40,566 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:38:40,569 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58150, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:38:40,572 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-15T04:38:40,575 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43826, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-15T04:38:40,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-15T04:38:40,584 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-15T04:38:40,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-15T04:38:40,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741949_1125 (size=999) 2024-12-15T04:38:41,009 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-15T04:38:41,009 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-15T04:38:41,014 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-15T04:38:41,025 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=574a05e47406cea06ff474376a420947, REOPEN/MOVE}] 2024-12-15T04:38:41,025 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=574a05e47406cea06ff474376a420947, REOPEN/MOVE 2024-12-15T04:38:41,026 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=574a05e47406cea06ff474376a420947, regionState=CLOSING, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:41,026 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35185 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=CLOSING, location=e56de37b85b3,43199,1734237482035, table=TestAcidGuarantees, region=574a05e47406cea06ff474376a420947. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-15T04:38:41,027 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T04:38:41,027 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; CloseRegionProcedure 574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035}] 2024-12-15T04:38:41,179 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:41,180 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(124): Close 574a05e47406cea06ff474376a420947 2024-12-15T04:38:41,180 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T04:38:41,180 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1681): Closing 574a05e47406cea06ff474376a420947, disabling compactions & flushes 2024-12-15T04:38:41,180 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:41,180 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:41,180 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. after waiting 0 ms 2024-12-15T04:38:41,180 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:41,186 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-15T04:38:41,187 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:41,187 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegion(1635): Region close journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:41,187 WARN [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] regionserver.HRegionServer(3786): Not adding moved region record: 574a05e47406cea06ff474376a420947 to self. 2024-12-15T04:38:41,190 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=37}] handler.UnassignRegionHandler(170): Closed 574a05e47406cea06ff474376a420947 2024-12-15T04:38:41,190 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=574a05e47406cea06ff474376a420947, regionState=CLOSED 2024-12-15T04:38:41,193 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-12-15T04:38:41,193 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; CloseRegionProcedure 574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 in 165 msec 2024-12-15T04:38:41,194 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=574a05e47406cea06ff474376a420947, REOPEN/MOVE; state=CLOSED, location=e56de37b85b3,43199,1734237482035; forceNewPlan=false, retain=true 2024-12-15T04:38:41,345 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=574a05e47406cea06ff474376a420947, regionState=OPENING, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:41,346 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=38, ppid=36, state=RUNNABLE; OpenRegionProcedure 574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035}] 2024-12-15T04:38:41,498 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:41,502 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:41,503 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(7285): Opening region: {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} 2024-12-15T04:38:41,503 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 574a05e47406cea06ff474376a420947 2024-12-15T04:38:41,503 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T04:38:41,504 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(7327): checking encryption for 574a05e47406cea06ff474376a420947 2024-12-15T04:38:41,504 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(7330): checking classloading for 574a05e47406cea06ff474376a420947 2024-12-15T04:38:41,507 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 574a05e47406cea06ff474376a420947 2024-12-15T04:38:41,508 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:38:41,515 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 574a05e47406cea06ff474376a420947 columnFamilyName A 2024-12-15T04:38:41,517 DEBUG [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:41,517 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] regionserver.HStore(327): Store=574a05e47406cea06ff474376a420947/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:38:41,518 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 574a05e47406cea06ff474376a420947 2024-12-15T04:38:41,519 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:38:41,519 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 574a05e47406cea06ff474376a420947 columnFamilyName B 2024-12-15T04:38:41,519 DEBUG [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:41,519 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] regionserver.HStore(327): Store=574a05e47406cea06ff474376a420947/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:38:41,519 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 574a05e47406cea06ff474376a420947 2024-12-15T04:38:41,520 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:38:41,520 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 574a05e47406cea06ff474376a420947 columnFamilyName C 2024-12-15T04:38:41,520 DEBUG [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:41,521 INFO [StoreOpener-574a05e47406cea06ff474376a420947-1 {}] regionserver.HStore(327): Store=574a05e47406cea06ff474376a420947/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:38:41,521 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:41,522 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947 2024-12-15T04:38:41,523 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947 2024-12-15T04:38:41,525 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-15T04:38:41,526 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(1085): writing seq id for 574a05e47406cea06ff474376a420947 2024-12-15T04:38:41,527 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(1102): Opened 574a05e47406cea06ff474376a420947; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62315350, jitterRate=-0.07142892479896545}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-15T04:38:41,531 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegion(1001): Region open journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:41,532 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., pid=38, masterSystemTime=1734237521498 2024-12-15T04:38:41,533 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:41,533 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=38}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:41,533 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=574a05e47406cea06ff474376a420947, regionState=OPEN, openSeqNum=5, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:41,537 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=38, resume processing ppid=36 2024-12-15T04:38:41,537 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, ppid=36, state=SUCCESS; OpenRegionProcedure 574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 in 189 msec 2024-12-15T04:38:41,538 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-12-15T04:38:41,538 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=574a05e47406cea06ff474376a420947, REOPEN/MOVE in 513 msec 2024-12-15T04:38:41,541 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-12-15T04:38:41,541 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 526 msec 2024-12-15T04:38:41,543 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 955 msec 2024-12-15T04:38:41,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-12-15T04:38:41,553 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7b4bd1ba to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@491ea2ee 2024-12-15T04:38:41,612 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@328f994d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:38:41,614 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ebda6ad to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@b44b1e5 2024-12-15T04:38:41,632 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a9306be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:38:41,634 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x505d5ccd to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@46114993 2024-12-15T04:38:41,645 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@769942d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:38:41,647 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x367f47f7 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2885d2d9 2024-12-15T04:38:41,657 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@247c0c93, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:38:41,658 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x78cafade to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@152377d4 2024-12-15T04:38:41,665 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@517ff977, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:38:41,666 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1a52344f to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3448d233 2024-12-15T04:38:41,673 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1c7940d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:38:41,674 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08ba8425 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7a11164b 2024-12-15T04:38:41,681 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c38ee58, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:38:41,683 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7af61386 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8a7e1dd 2024-12-15T04:38:41,690 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@630684bf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:38:41,691 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x063e87c8 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@31a027db 2024-12-15T04:38:41,698 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66547e2c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:38:41,702 DEBUG [hconnection-0x1d326d67-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:38:41,702 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:38:41,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=39, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees 2024-12-15T04:38:41,703 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=39, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:38:41,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-15T04:38:41,703 DEBUG [hconnection-0x5dd8e4db-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:38:41,704 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=39, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:38:41,704 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:38:41,705 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58154, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:38:41,706 DEBUG [hconnection-0x357e1a9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:38:41,707 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58160, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:38:41,710 DEBUG [hconnection-0x32a248f2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:38:41,711 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58172, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:38:41,711 DEBUG [hconnection-0x68695b3e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:38:41,712 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58182, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:38:41,715 DEBUG [hconnection-0x4359b297-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:38:41,716 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58192, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:38:41,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 574a05e47406cea06ff474376a420947 2024-12-15T04:38:41,717 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58196, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:38:41,717 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 574a05e47406cea06ff474376a420947 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-15T04:38:41,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=A 2024-12-15T04:38:41,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:41,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=B 2024-12-15T04:38:41,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:41,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=C 2024-12-15T04:38:41,718 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:41,719 DEBUG [hconnection-0x1691549f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:38:41,720 DEBUG [hconnection-0x2bcf440e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:38:41,721 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58208, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:38:41,721 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58220, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:38:41,722 DEBUG [hconnection-0x5fd11fbd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:38:41,723 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58222, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:38:41,756 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:41,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237581752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:41,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:41,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237581754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:41,758 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:41,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237581756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:41,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:41,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237581756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:41,759 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:41,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237581757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:41,771 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412156a24637f470542e9a71fe133d92af579_574a05e47406cea06ff474376a420947 is 50, key is test_row_0/A:col10/1734237521716/Put/seqid=0 2024-12-15T04:38:41,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741950_1126 (size=12154) 2024-12-15T04:38:41,786 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:41,790 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412156a24637f470542e9a71fe133d92af579_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412156a24637f470542e9a71fe133d92af579_574a05e47406cea06ff474376a420947 2024-12-15T04:38:41,793 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/cb02d24f863a4bbda083d9c8c800e42f, store: [table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:41,801 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/cb02d24f863a4bbda083d9c8c800e42f is 175, key is test_row_0/A:col10/1734237521716/Put/seqid=0 2024-12-15T04:38:41,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-15T04:38:41,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741951_1127 (size=30955) 2024-12-15T04:38:41,855 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:41,856 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-15T04:38:41,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:41,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:41,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:41,856 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:41,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:41,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:41,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:41,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237581861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:41,863 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:41,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237581859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:41,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:41,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237581861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:41,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:41,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237581862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:41,864 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:41,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237581862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:42,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-15T04:38:42,010 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:42,010 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-15T04:38:42,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:42,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:42,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:42,011 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:42,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:42,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:42,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:42,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237582065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:42,067 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:42,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:42,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237582066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:42,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237582066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:42,067 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:42,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237582066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:42,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:42,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237582067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:42,163 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:42,164 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-15T04:38:42,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:42,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:42,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:42,164 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:42,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:42,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:42,230 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/cb02d24f863a4bbda083d9c8c800e42f 2024-12-15T04:38:42,255 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/20bd25bbf5544a9a8317b04a1465cc34 is 50, key is test_row_0/B:col10/1734237521716/Put/seqid=0 2024-12-15T04:38:42,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741952_1128 (size=12001) 2024-12-15T04:38:42,292 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/20bd25bbf5544a9a8317b04a1465cc34 2024-12-15T04:38:42,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-15T04:38:42,317 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:42,318 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-15T04:38:42,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:42,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:42,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:42,318 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] handler.RSProcedureHandler(58): pid=40 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:42,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=40 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:42,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=40 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:42,326 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/d0c9bf9b018a4336b6bcb3e98d0b1be1 is 50, key is test_row_0/C:col10/1734237521716/Put/seqid=0 2024-12-15T04:38:42,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741953_1129 (size=12001) 2024-12-15T04:38:42,335 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/d0c9bf9b018a4336b6bcb3e98d0b1be1 2024-12-15T04:38:42,341 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/cb02d24f863a4bbda083d9c8c800e42f as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/cb02d24f863a4bbda083d9c8c800e42f 2024-12-15T04:38:42,352 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/cb02d24f863a4bbda083d9c8c800e42f, entries=150, sequenceid=16, filesize=30.2 K 2024-12-15T04:38:42,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/20bd25bbf5544a9a8317b04a1465cc34 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/20bd25bbf5544a9a8317b04a1465cc34 2024-12-15T04:38:42,367 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/20bd25bbf5544a9a8317b04a1465cc34, entries=150, sequenceid=16, filesize=11.7 K 2024-12-15T04:38:42,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/d0c9bf9b018a4336b6bcb3e98d0b1be1 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/d0c9bf9b018a4336b6bcb3e98d0b1be1 2024-12-15T04:38:42,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:42,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237582368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:42,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:42,374 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:42,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237582368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:42,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237582370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:42,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:42,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237582370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:42,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:42,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237582372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:42,377 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/d0c9bf9b018a4336b6bcb3e98d0b1be1, entries=150, sequenceid=16, filesize=11.7 K 2024-12-15T04:38:42,379 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 574a05e47406cea06ff474376a420947 in 662ms, sequenceid=16, compaction requested=false 2024-12-15T04:38:42,379 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:42,470 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:42,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=40 2024-12-15T04:38:42,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:42,471 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2837): Flushing 574a05e47406cea06ff474376a420947 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-15T04:38:42,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=A 2024-12-15T04:38:42,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:42,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=B 2024-12-15T04:38:42,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:42,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=C 2024-12-15T04:38:42,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:42,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215c054404cc07e4f118f76233e5b7c02fd_574a05e47406cea06ff474376a420947 is 50, key is test_row_0/A:col10/1734237521755/Put/seqid=0 2024-12-15T04:38:42,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741954_1130 (size=12154) 2024-12-15T04:38:42,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,544 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215c054404cc07e4f118f76233e5b7c02fd_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215c054404cc07e4f118f76233e5b7c02fd_574a05e47406cea06ff474376a420947 2024-12-15T04:38:42,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/b04648c4deef44c4915a8ea9d149cf05, store: [table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:42,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/b04648c4deef44c4915a8ea9d149cf05 is 175, key is test_row_0/A:col10/1734237521755/Put/seqid=0 2024-12-15T04:38:42,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741955_1131 (size=30955) 2024-12-15T04:38:42,558 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/b04648c4deef44c4915a8ea9d149cf05 2024-12-15T04:38:42,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/ff316e61fdbd43b7969fa984e46a6148 is 50, key is test_row_0/B:col10/1734237521755/Put/seqid=0 2024-12-15T04:38:42,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741956_1132 (size=12001) 2024-12-15T04:38:42,593 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/ff316e61fdbd43b7969fa984e46a6148 2024-12-15T04:38:42,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/46f1f2c70ad344aa8c05c16671b7c390 is 50, key is test_row_0/C:col10/1734237521755/Put/seqid=0 2024-12-15T04:38:42,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741957_1133 (size=12001) 2024-12-15T04:38:42,621 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/46f1f2c70ad344aa8c05c16671b7c390 2024-12-15T04:38:42,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/b04648c4deef44c4915a8ea9d149cf05 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/b04648c4deef44c4915a8ea9d149cf05 2024-12-15T04:38:42,658 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/b04648c4deef44c4915a8ea9d149cf05, entries=150, sequenceid=41, filesize=30.2 K 2024-12-15T04:38:42,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/ff316e61fdbd43b7969fa984e46a6148 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/ff316e61fdbd43b7969fa984e46a6148 2024-12-15T04:38:42,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,672 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/ff316e61fdbd43b7969fa984e46a6148, entries=150, sequenceid=41, filesize=11.7 K 2024-12-15T04:38:42,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/46f1f2c70ad344aa8c05c16671b7c390 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/46f1f2c70ad344aa8c05c16671b7c390 2024-12-15T04:38:42,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,679 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/46f1f2c70ad344aa8c05c16671b7c390, entries=150, sequenceid=41, filesize=11.7 K 2024-12-15T04:38:42,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,680 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for 574a05e47406cea06ff474376a420947 in 209ms, sequenceid=41, compaction requested=false 2024-12-15T04:38:42,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.HRegion(2538): Flush status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:42,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:42,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=40 2024-12-15T04:38:42,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=40 2024-12-15T04:38:42,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,686 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-12-15T04:38:42,686 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 978 msec 2024-12-15T04:38:42,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,688 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=39, table=TestAcidGuarantees in 985 msec 2024-12-15T04:38:42,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=39 2024-12-15T04:38:42,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,807 INFO [Thread-635 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 39 completed 2024-12-15T04:38:42,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,809 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:38:42,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees 2024-12-15T04:38:42,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-15T04:38:42,812 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=41, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:38:42,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,812 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=41, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:38:42,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,813 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:38:42,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-15T04:38:42,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 574a05e47406cea06ff474376a420947 2024-12-15T04:38:42,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,929 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 574a05e47406cea06ff474376a420947 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-15T04:38:42,929 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=A 2024-12-15T04:38:42,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:42,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=B 2024-12-15T04:38:42,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:42,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=C 2024-12-15T04:38:42,930 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:42,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,939 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121514a6c42bb1b64e44aa76f9552a456cd1_574a05e47406cea06ff474376a420947 is 50, key is test_row_0/A:col10/1734237522928/Put/seqid=0 2024-12-15T04:38:42,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,965 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:42,967 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-15T04:38:42,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:42,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:42,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:42,968 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:42,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:42,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:42,979 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:42,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237582970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:42,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:42,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237582973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:42,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:42,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237582973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:42,981 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:42,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237582976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:42,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:42,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237582976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:42,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741959_1135 (size=24358) 2024-12-15T04:38:42,985 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:42,989 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-15T04:38:42,992 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121514a6c42bb1b64e44aa76f9552a456cd1_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121514a6c42bb1b64e44aa76f9552a456cd1_574a05e47406cea06ff474376a420947 2024-12-15T04:38:43,000 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/edee14bc5c724a38bb3aaf3c52ce4528, store: [table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:43,002 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/edee14bc5c724a38bb3aaf3c52ce4528 is 175, key is test_row_0/A:col10/1734237522928/Put/seqid=0 2024-12-15T04:38:43,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741958_1134 (size=73991) 2024-12-15T04:38:43,006 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=52, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/edee14bc5c724a38bb3aaf3c52ce4528 2024-12-15T04:38:43,026 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/30f3f3056a70454d8378ee81d929692d is 50, key is test_row_0/B:col10/1734237522928/Put/seqid=0 2024-12-15T04:38:43,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741960_1136 (size=9657) 2024-12-15T04:38:43,059 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/30f3f3056a70454d8378ee81d929692d 2024-12-15T04:38:43,072 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/61dc7b599c2049429621ef43a4cf4f8a is 50, key is test_row_0/C:col10/1734237522928/Put/seqid=0 2024-12-15T04:38:43,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237583081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237583082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237583082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237583083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,087 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237583084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741961_1137 (size=9657) 2024-12-15T04:38:43,094 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/61dc7b599c2049429621ef43a4cf4f8a 2024-12-15T04:38:43,102 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/edee14bc5c724a38bb3aaf3c52ce4528 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/edee14bc5c724a38bb3aaf3c52ce4528 2024-12-15T04:38:43,110 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/edee14bc5c724a38bb3aaf3c52ce4528, entries=400, sequenceid=52, filesize=72.3 K 2024-12-15T04:38:43,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/30f3f3056a70454d8378ee81d929692d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/30f3f3056a70454d8378ee81d929692d 2024-12-15T04:38:43,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-15T04:38:43,120 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,121 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-15T04:38:43,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:43,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:43,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:43,121 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] handler.RSProcedureHandler(58): pid=42 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:43,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=42 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:43,122 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/30f3f3056a70454d8378ee81d929692d, entries=100, sequenceid=52, filesize=9.4 K 2024-12-15T04:38:43,124 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/61dc7b599c2049429621ef43a4cf4f8a as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/61dc7b599c2049429621ef43a4cf4f8a 2024-12-15T04:38:43,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=42 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:43,132 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/61dc7b599c2049429621ef43a4cf4f8a, entries=100, sequenceid=52, filesize=9.4 K 2024-12-15T04:38:43,133 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 574a05e47406cea06ff474376a420947 in 204ms, sequenceid=52, compaction requested=true 2024-12-15T04:38:43,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:43,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:38:43,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:43,133 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:43,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:38:43,133 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:43,133 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:43,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:38:43,134 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:43,136 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:43,136 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/B is initiating minor compaction (all files) 2024-12-15T04:38:43,136 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/B in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:43,136 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/20bd25bbf5544a9a8317b04a1465cc34, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/ff316e61fdbd43b7969fa984e46a6148, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/30f3f3056a70454d8378ee81d929692d] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=32.9 K 2024-12-15T04:38:43,136 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 135901 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:43,137 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/A is initiating minor compaction (all files) 2024-12-15T04:38:43,137 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/A in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:43,137 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/cb02d24f863a4bbda083d9c8c800e42f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/b04648c4deef44c4915a8ea9d149cf05, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/edee14bc5c724a38bb3aaf3c52ce4528] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=132.7 K 2024-12-15T04:38:43,137 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:43,137 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. files: [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/cb02d24f863a4bbda083d9c8c800e42f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/b04648c4deef44c4915a8ea9d149cf05, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/edee14bc5c724a38bb3aaf3c52ce4528] 2024-12-15T04:38:43,138 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 20bd25bbf5544a9a8317b04a1465cc34, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1734237521708 2024-12-15T04:38:43,138 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb02d24f863a4bbda083d9c8c800e42f, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1734237521708 2024-12-15T04:38:43,139 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting ff316e61fdbd43b7969fa984e46a6148, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734237521742 2024-12-15T04:38:43,139 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting b04648c4deef44c4915a8ea9d149cf05, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734237521742 2024-12-15T04:38:43,139 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 30f3f3056a70454d8378ee81d929692d, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734237522897 2024-12-15T04:38:43,140 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting edee14bc5c724a38bb3aaf3c52ce4528, keycount=400, bloomtype=ROW, size=72.3 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734237522890 2024-12-15T04:38:43,153 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:43,158 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#B#compaction#118 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:43,159 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/a1cc6c9931314587815b35f492aa918a is 50, key is test_row_0/B:col10/1734237522928/Put/seqid=0 2024-12-15T04:38:43,164 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121515105f423cd541cc9766675883330420_574a05e47406cea06ff474376a420947 store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:43,170 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121515105f423cd541cc9766675883330420_574a05e47406cea06ff474376a420947, store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:43,170 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121515105f423cd541cc9766675883330420_574a05e47406cea06ff474376a420947 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:43,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741962_1138 (size=12104) 2024-12-15T04:38:43,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741963_1139 (size=4469) 2024-12-15T04:38:43,199 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#A#compaction#117 average throughput is 0.53 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:43,201 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/460a9ac95ada4f2ca870b6c7d2abd39c is 175, key is test_row_0/A:col10/1734237522928/Put/seqid=0 2024-12-15T04:38:43,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741964_1140 (size=31165) 2024-12-15T04:38:43,231 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/460a9ac95ada4f2ca870b6c7d2abd39c as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/460a9ac95ada4f2ca870b6c7d2abd39c 2024-12-15T04:38:43,239 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/A of 574a05e47406cea06ff474376a420947 into 460a9ac95ada4f2ca870b6c7d2abd39c(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:43,239 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:43,239 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/A, priority=13, startTime=1734237523133; duration=0sec 2024-12-15T04:38:43,240 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:43,240 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:A 2024-12-15T04:38:43,240 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:43,241 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:43,241 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/C is initiating minor compaction (all files) 2024-12-15T04:38:43,241 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/C in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:43,242 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/d0c9bf9b018a4336b6bcb3e98d0b1be1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/46f1f2c70ad344aa8c05c16671b7c390, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/61dc7b599c2049429621ef43a4cf4f8a] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=32.9 K 2024-12-15T04:38:43,243 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0c9bf9b018a4336b6bcb3e98d0b1be1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1734237521708 2024-12-15T04:38:43,243 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46f1f2c70ad344aa8c05c16671b7c390, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734237521742 2024-12-15T04:38:43,244 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 61dc7b599c2049429621ef43a4cf4f8a, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734237522897 2024-12-15T04:38:43,256 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#C#compaction#119 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:43,256 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/b36d2b88d6004924b35599d0c21ff1c2 is 50, key is test_row_0/C:col10/1734237522928/Put/seqid=0 2024-12-15T04:38:43,277 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,278 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=42 2024-12-15T04:38:43,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:43,278 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2837): Flushing 574a05e47406cea06ff474376a420947 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-15T04:38:43,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=A 2024-12-15T04:38:43,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:43,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=B 2024-12-15T04:38:43,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:43,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=C 2024-12-15T04:38:43,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:43,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741965_1141 (size=12104) 2024-12-15T04:38:43,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 574a05e47406cea06ff474376a420947 2024-12-15T04:38:43,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:43,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412152951ce2c339d46a69a37271a269c3140_574a05e47406cea06ff474376a420947 is 50, key is test_row_0/A:col10/1734237522973/Put/seqid=0 2024-12-15T04:38:43,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237583300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,304 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237583300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,307 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237583303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,309 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237583304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237583304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741966_1142 (size=12154) 2024-12-15T04:38:43,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,319 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412152951ce2c339d46a69a37271a269c3140_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412152951ce2c339d46a69a37271a269c3140_574a05e47406cea06ff474376a420947 2024-12-15T04:38:43,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/9e4df0dfac7e413db6309f755db31bb4, store: [table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:43,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/9e4df0dfac7e413db6309f755db31bb4 is 175, key is test_row_0/A:col10/1734237522973/Put/seqid=0 2024-12-15T04:38:43,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741967_1143 (size=30955) 2024-12-15T04:38:43,334 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/9e4df0dfac7e413db6309f755db31bb4 2024-12-15T04:38:43,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/122dbb7f65804b3289ca6e49318f81c5 is 50, key is test_row_0/B:col10/1734237522973/Put/seqid=0 2024-12-15T04:38:43,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741968_1144 (size=12001) 2024-12-15T04:38:43,374 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/122dbb7f65804b3289ca6e49318f81c5 2024-12-15T04:38:43,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/16742f2046e04f1a859155fb1408c999 is 50, key is test_row_0/C:col10/1734237522973/Put/seqid=0 2024-12-15T04:38:43,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741969_1145 (size=12001) 2024-12-15T04:38:43,407 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/16742f2046e04f1a859155fb1408c999 2024-12-15T04:38:43,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237583406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,411 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237583406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237583409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,413 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237583412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237583411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-15T04:38:43,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/9e4df0dfac7e413db6309f755db31bb4 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/9e4df0dfac7e413db6309f755db31bb4 2024-12-15T04:38:43,420 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/9e4df0dfac7e413db6309f755db31bb4, entries=150, sequenceid=79, filesize=30.2 K 2024-12-15T04:38:43,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/122dbb7f65804b3289ca6e49318f81c5 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/122dbb7f65804b3289ca6e49318f81c5 2024-12-15T04:38:43,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,427 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/122dbb7f65804b3289ca6e49318f81c5, entries=150, sequenceid=79, filesize=11.7 K 2024-12-15T04:38:43,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/16742f2046e04f1a859155fb1408c999 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/16742f2046e04f1a859155fb1408c999 2024-12-15T04:38:43,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,436 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/16742f2046e04f1a859155fb1408c999, entries=150, sequenceid=79, filesize=11.7 K 2024-12-15T04:38:43,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,437 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 574a05e47406cea06ff474376a420947 in 159ms, sequenceid=79, compaction requested=false 2024-12-15T04:38:43,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.HRegion(2538): Flush status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:43,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:43,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=42}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=42 2024-12-15T04:38:43,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=42 2024-12-15T04:38:43,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,440 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=41 2024-12-15T04:38:43,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,441 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 626 msec 2024-12-15T04:38:43,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,442 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=41, table=TestAcidGuarantees in 632 msec 2024-12-15T04:38:43,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,589 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/a1cc6c9931314587815b35f492aa918a as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/a1cc6c9931314587815b35f492aa918a 2024-12-15T04:38:43,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,597 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/B of 574a05e47406cea06ff474376a420947 into a1cc6c9931314587815b35f492aa918a(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:43,597 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:43,597 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/B, priority=13, startTime=1734237523133; duration=0sec 2024-12-15T04:38:43,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,598 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:43,598 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:B 2024-12-15T04:38:43,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 574a05e47406cea06ff474376a420947 2024-12-15T04:38:43,624 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 574a05e47406cea06ff474376a420947 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-15T04:38:43,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=A 2024-12-15T04:38:43,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:43,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=B 2024-12-15T04:38:43,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:43,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=C 2024-12-15T04:38:43,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,624 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:43,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215675a803585eb4be29006539511aad549_574a05e47406cea06ff474376a420947 is 50, key is test_row_0/A:col10/1734237523622/Put/seqid=0 2024-12-15T04:38:43,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741970_1146 (size=12154) 2024-12-15T04:38:43,663 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:43,672 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215675a803585eb4be29006539511aad549_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215675a803585eb4be29006539511aad549_574a05e47406cea06ff474376a420947 2024-12-15T04:38:43,674 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/da9f891bf91344df8868cd32c6e061ef, store: [table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:43,674 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/da9f891bf91344df8868cd32c6e061ef is 175, key is test_row_0/A:col10/1734237523622/Put/seqid=0 2024-12-15T04:38:43,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741971_1147 (size=30951) 2024-12-15T04:38:43,691 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/b36d2b88d6004924b35599d0c21ff1c2 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/b36d2b88d6004924b35599d0c21ff1c2 2024-12-15T04:38:43,708 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/C of 574a05e47406cea06ff474376a420947 into b36d2b88d6004924b35599d0c21ff1c2(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:43,708 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:43,708 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/C, priority=13, startTime=1734237523133; duration=0sec 2024-12-15T04:38:43,709 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:43,709 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:C 2024-12-15T04:38:43,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237583658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237583668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,715 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,715 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237583710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237583710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,716 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237583710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237583811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,818 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237583816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237583817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237583818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:43,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237583819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:43,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-15T04:38:43,915 INFO [Thread-635 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 41 completed 2024-12-15T04:38:43,917 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:38:43,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees 2024-12-15T04:38:43,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-15T04:38:43,919 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:38:43,920 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:38:43,920 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:38:44,015 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237584014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-15T04:38:44,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237584020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237584021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237584021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,023 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237584022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,073 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,074 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-15T04:38:44,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:44,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:44,074 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:44,074 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:44,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:44,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:44,082 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=92, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/da9f891bf91344df8868cd32c6e061ef 2024-12-15T04:38:44,095 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/de75e2ccbea34928b14502e0104ced72 is 50, key is test_row_0/B:col10/1734237523622/Put/seqid=0 2024-12-15T04:38:44,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741972_1148 (size=9657) 2024-12-15T04:38:44,121 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/de75e2ccbea34928b14502e0104ced72 2024-12-15T04:38:44,129 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/54d3e8e98f934fa6946627a4f23d7b1a is 50, key is test_row_0/C:col10/1734237523622/Put/seqid=0 2024-12-15T04:38:44,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741973_1149 (size=9657) 2024-12-15T04:38:44,134 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/54d3e8e98f934fa6946627a4f23d7b1a 2024-12-15T04:38:44,143 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/da9f891bf91344df8868cd32c6e061ef as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/da9f891bf91344df8868cd32c6e061ef 2024-12-15T04:38:44,151 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/da9f891bf91344df8868cd32c6e061ef, entries=150, sequenceid=92, filesize=30.2 K 2024-12-15T04:38:44,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/de75e2ccbea34928b14502e0104ced72 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/de75e2ccbea34928b14502e0104ced72 2024-12-15T04:38:44,160 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/de75e2ccbea34928b14502e0104ced72, entries=100, sequenceid=92, filesize=9.4 K 2024-12-15T04:38:44,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/54d3e8e98f934fa6946627a4f23d7b1a as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/54d3e8e98f934fa6946627a4f23d7b1a 2024-12-15T04:38:44,170 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/54d3e8e98f934fa6946627a4f23d7b1a, entries=100, sequenceid=92, filesize=9.4 K 2024-12-15T04:38:44,171 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 574a05e47406cea06ff474376a420947 in 547ms, sequenceid=92, compaction requested=true 2024-12-15T04:38:44,171 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:44,171 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:44,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:38:44,172 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:44,172 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93071 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:44,172 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/A is initiating minor compaction (all files) 2024-12-15T04:38:44,172 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/A in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:44,173 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/460a9ac95ada4f2ca870b6c7d2abd39c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/9e4df0dfac7e413db6309f755db31bb4, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/da9f891bf91344df8868cd32c6e061ef] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=90.9 K 2024-12-15T04:38:44,173 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:44,173 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. files: [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/460a9ac95ada4f2ca870b6c7d2abd39c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/9e4df0dfac7e413db6309f755db31bb4, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/da9f891bf91344df8868cd32c6e061ef] 2024-12-15T04:38:44,173 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:44,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:38:44,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:44,173 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 460a9ac95ada4f2ca870b6c7d2abd39c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734237521742 2024-12-15T04:38:44,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:38:44,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:44,174 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e4df0dfac7e413db6309f755db31bb4, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734237522963 2024-12-15T04:38:44,174 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting da9f891bf91344df8868cd32c6e061ef, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1734237523301 2024-12-15T04:38:44,176 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:44,176 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/B is initiating minor compaction (all files) 2024-12-15T04:38:44,176 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/B in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:44,176 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/a1cc6c9931314587815b35f492aa918a, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/122dbb7f65804b3289ca6e49318f81c5, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/de75e2ccbea34928b14502e0104ced72] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=33.0 K 2024-12-15T04:38:44,177 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting a1cc6c9931314587815b35f492aa918a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734237521742 2024-12-15T04:38:44,179 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 122dbb7f65804b3289ca6e49318f81c5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734237522963 2024-12-15T04:38:44,180 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting de75e2ccbea34928b14502e0104ced72, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1734237523301 2024-12-15T04:38:44,188 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:44,205 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#B#compaction#127 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:44,206 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/b93f58627cd94f6cb5c812b6bef90fc9 is 50, key is test_row_0/B:col10/1734237523622/Put/seqid=0 2024-12-15T04:38:44,212 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412150681fcb41bb64368b3b6e6093df08edb_574a05e47406cea06ff474376a420947 store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:44,214 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412150681fcb41bb64368b3b6e6093df08edb_574a05e47406cea06ff474376a420947, store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:44,216 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412150681fcb41bb64368b3b6e6093df08edb_574a05e47406cea06ff474376a420947 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:44,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741974_1150 (size=12207) 2024-12-15T04:38:44,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-15T04:38:44,229 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741975_1151 (size=4469) 2024-12-15T04:38:44,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-12-15T04:38:44,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:44,232 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2837): Flushing 574a05e47406cea06ff474376a420947 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-15T04:38:44,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=A 2024-12-15T04:38:44,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:44,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=B 2024-12-15T04:38:44,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:44,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=C 2024-12-15T04:38:44,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:44,233 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#A#compaction#126 average throughput is 0.54 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:44,234 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/1b9c4a1e8e494e4ba0b8eb87ac493878 is 175, key is test_row_0/A:col10/1734237523622/Put/seqid=0 2024-12-15T04:38:44,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741976_1152 (size=31268) 2024-12-15T04:38:44,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412155fdcc2f891d44e55bf413b0c843469af_574a05e47406cea06ff474376a420947 is 50, key is test_row_0/A:col10/1734237523660/Put/seqid=0 2024-12-15T04:38:44,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741977_1153 (size=12154) 2024-12-15T04:38:44,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 574a05e47406cea06ff474376a420947 2024-12-15T04:38:44,320 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:44,334 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237584331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237584331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237584332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237584334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237584334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237584436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237584436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237584436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,442 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237584440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,442 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237584440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-15T04:38:44,626 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/b93f58627cd94f6cb5c812b6bef90fc9 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/b93f58627cd94f6cb5c812b6bef90fc9 2024-12-15T04:38:44,635 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/B of 574a05e47406cea06ff474376a420947 into b93f58627cd94f6cb5c812b6bef90fc9(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:44,636 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:44,636 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/B, priority=13, startTime=1734237524172; duration=0sec 2024-12-15T04:38:44,636 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:44,636 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:B 2024-12-15T04:38:44,636 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:44,637 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33762 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:44,638 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/C is initiating minor compaction (all files) 2024-12-15T04:38:44,638 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/C in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:44,638 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/b36d2b88d6004924b35599d0c21ff1c2, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/16742f2046e04f1a859155fb1408c999, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/54d3e8e98f934fa6946627a4f23d7b1a] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=33.0 K 2024-12-15T04:38:44,639 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting b36d2b88d6004924b35599d0c21ff1c2, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734237521742 2024-12-15T04:38:44,639 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237584639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,640 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 16742f2046e04f1a859155fb1408c999, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734237522963 2024-12-15T04:38:44,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237584639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,642 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 54d3e8e98f934fa6946627a4f23d7b1a, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1734237523301 2024-12-15T04:38:44,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237584641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237584644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,646 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237584646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,658 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/1b9c4a1e8e494e4ba0b8eb87ac493878 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/1b9c4a1e8e494e4ba0b8eb87ac493878 2024-12-15T04:38:44,673 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/A of 574a05e47406cea06ff474376a420947 into 1b9c4a1e8e494e4ba0b8eb87ac493878(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:44,673 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:44,673 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/A, priority=13, startTime=1734237524171; duration=0sec 2024-12-15T04:38:44,673 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:44,674 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:A 2024-12-15T04:38:44,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:44,676 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#C#compaction#129 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:44,677 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/62bb6883a73841b898666885138865eb is 50, key is test_row_0/C:col10/1734237523622/Put/seqid=0 2024-12-15T04:38:44,685 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412155fdcc2f891d44e55bf413b0c843469af_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412155fdcc2f891d44e55bf413b0c843469af_574a05e47406cea06ff474376a420947 2024-12-15T04:38:44,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/6651808ca748443faadfdd099c145eb5, store: [table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:44,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/6651808ca748443faadfdd099c145eb5 is 175, key is test_row_0/A:col10/1734237523660/Put/seqid=0 2024-12-15T04:38:44,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741978_1154 (size=12207) 2024-12-15T04:38:44,707 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/62bb6883a73841b898666885138865eb as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/62bb6883a73841b898666885138865eb 2024-12-15T04:38:44,715 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/C of 574a05e47406cea06ff474376a420947 into 62bb6883a73841b898666885138865eb(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:44,716 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:44,716 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/C, priority=13, startTime=1734237524173; duration=0sec 2024-12-15T04:38:44,717 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:44,717 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:C 2024-12-15T04:38:44,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741979_1155 (size=30955) 2024-12-15T04:38:44,722 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/6651808ca748443faadfdd099c145eb5 2024-12-15T04:38:44,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/4823c7107c974075b882703ee2722207 is 50, key is test_row_0/B:col10/1734237523660/Put/seqid=0 2024-12-15T04:38:44,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741980_1156 (size=12001) 2024-12-15T04:38:44,774 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/4823c7107c974075b882703ee2722207 2024-12-15T04:38:44,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/2052d82ea40f45f1b546b62a16d50c24 is 50, key is test_row_0/C:col10/1734237523660/Put/seqid=0 2024-12-15T04:38:44,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741981_1157 (size=12001) 2024-12-15T04:38:44,802 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/2052d82ea40f45f1b546b62a16d50c24 2024-12-15T04:38:44,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/6651808ca748443faadfdd099c145eb5 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/6651808ca748443faadfdd099c145eb5 2024-12-15T04:38:44,819 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/6651808ca748443faadfdd099c145eb5, entries=150, sequenceid=118, filesize=30.2 K 2024-12-15T04:38:44,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/4823c7107c974075b882703ee2722207 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/4823c7107c974075b882703ee2722207 2024-12-15T04:38:44,828 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/4823c7107c974075b882703ee2722207, entries=150, sequenceid=118, filesize=11.7 K 2024-12-15T04:38:44,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/2052d82ea40f45f1b546b62a16d50c24 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/2052d82ea40f45f1b546b62a16d50c24 2024-12-15T04:38:44,836 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/2052d82ea40f45f1b546b62a16d50c24, entries=150, sequenceid=118, filesize=11.7 K 2024-12-15T04:38:44,839 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 574a05e47406cea06ff474376a420947 in 606ms, sequenceid=118, compaction requested=false 2024-12-15T04:38:44,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2538): Flush status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:44,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:44,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=44 2024-12-15T04:38:44,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=44 2024-12-15T04:38:44,844 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-12-15T04:38:44,844 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 921 msec 2024-12-15T04:38:44,850 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees in 928 msec 2024-12-15T04:38:44,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 574a05e47406cea06ff474376a420947 2024-12-15T04:38:44,944 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 574a05e47406cea06ff474376a420947 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-15T04:38:44,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=A 2024-12-15T04:38:44,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:44,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=B 2024-12-15T04:38:44,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:44,945 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=C 2024-12-15T04:38:44,945 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:44,969 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412151cbc271c46a64f3c849adf475b6c3271_574a05e47406cea06ff474376a420947 is 50, key is test_row_0/A:col10/1734237524943/Put/seqid=0 2024-12-15T04:38:44,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741982_1158 (size=12204) 2024-12-15T04:38:44,977 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:44,983 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412151cbc271c46a64f3c849adf475b6c3271_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412151cbc271c46a64f3c849adf475b6c3271_574a05e47406cea06ff474376a420947 2024-12-15T04:38:44,985 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/60adf7ece7544820a852632f0784a0ff, store: [table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:44,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/60adf7ece7544820a852632f0784a0ff is 175, key is test_row_0/A:col10/1734237524943/Put/seqid=0 2024-12-15T04:38:44,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237584985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,991 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741983_1159 (size=31005) 2024-12-15T04:38:44,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237584988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,992 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=134, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/60adf7ece7544820a852632f0784a0ff 2024-12-15T04:38:44,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237584990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,995 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237584990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:44,996 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:44,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237584991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,005 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/1c99e43885964cf99f6bee3ced8af7bb is 50, key is test_row_0/B:col10/1734237524943/Put/seqid=0 2024-12-15T04:38:45,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741984_1160 (size=12051) 2024-12-15T04:38:45,012 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/1c99e43885964cf99f6bee3ced8af7bb 2024-12-15T04:38:45,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-12-15T04:38:45,023 INFO [Thread-635 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 43 completed 2024-12-15T04:38:45,023 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/d3e2d92de699401a90d18c44b8332db8 is 50, key is test_row_0/C:col10/1734237524943/Put/seqid=0 2024-12-15T04:38:45,026 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:38:45,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-12-15T04:38:45,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-15T04:38:45,030 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:38:45,031 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:38:45,032 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:38:45,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741985_1161 (size=12051) 2024-12-15T04:38:45,046 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/d3e2d92de699401a90d18c44b8332db8 2024-12-15T04:38:45,055 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/60adf7ece7544820a852632f0784a0ff as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/60adf7ece7544820a852632f0784a0ff 2024-12-15T04:38:45,065 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/60adf7ece7544820a852632f0784a0ff, entries=150, sequenceid=134, filesize=30.3 K 2024-12-15T04:38:45,067 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/1c99e43885964cf99f6bee3ced8af7bb as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/1c99e43885964cf99f6bee3ced8af7bb 2024-12-15T04:38:45,075 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/1c99e43885964cf99f6bee3ced8af7bb, entries=150, sequenceid=134, filesize=11.8 K 2024-12-15T04:38:45,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/d3e2d92de699401a90d18c44b8332db8 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/d3e2d92de699401a90d18c44b8332db8 2024-12-15T04:38:45,084 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/d3e2d92de699401a90d18c44b8332db8, entries=150, sequenceid=134, filesize=11.8 K 2024-12-15T04:38:45,085 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 574a05e47406cea06ff474376a420947 in 141ms, sequenceid=134, compaction requested=true 2024-12-15T04:38:45,085 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:45,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:38:45,086 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:45,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:45,086 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:45,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:38:45,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:45,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:38:45,086 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:45,087 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93228 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:45,087 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/A is initiating minor compaction (all files) 2024-12-15T04:38:45,088 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:45,088 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/A in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:45,088 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/B is initiating minor compaction (all files) 2024-12-15T04:38:45,088 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/B in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:45,088 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/1b9c4a1e8e494e4ba0b8eb87ac493878, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/6651808ca748443faadfdd099c145eb5, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/60adf7ece7544820a852632f0784a0ff] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=91.0 K 2024-12-15T04:38:45,088 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/b93f58627cd94f6cb5c812b6bef90fc9, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/4823c7107c974075b882703ee2722207, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/1c99e43885964cf99f6bee3ced8af7bb] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=35.4 K 2024-12-15T04:38:45,088 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:45,088 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. files: [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/1b9c4a1e8e494e4ba0b8eb87ac493878, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/6651808ca748443faadfdd099c145eb5, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/60adf7ece7544820a852632f0784a0ff] 2024-12-15T04:38:45,089 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting b93f58627cd94f6cb5c812b6bef90fc9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1734237522969 2024-12-15T04:38:45,089 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b9c4a1e8e494e4ba0b8eb87ac493878, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1734237522969 2024-12-15T04:38:45,089 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 4823c7107c974075b882703ee2722207, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734237523660 2024-12-15T04:38:45,090 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6651808ca748443faadfdd099c145eb5, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734237523660 2024-12-15T04:38:45,090 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 60adf7ece7544820a852632f0784a0ff, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1734237524327 2024-12-15T04:38:45,090 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c99e43885964cf99f6bee3ced8af7bb, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1734237524327 2024-12-15T04:38:45,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 574a05e47406cea06ff474376a420947 2024-12-15T04:38:45,099 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 574a05e47406cea06ff474376a420947 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-15T04:38:45,103 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:45,103 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#B#compaction#135 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:45,103 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=A 2024-12-15T04:38:45,103 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/ab653a7a5b9e4b59a04db1723c7cc2a1 is 50, key is test_row_0/B:col10/1734237524943/Put/seqid=0 2024-12-15T04:38:45,103 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:45,104 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=B 2024-12-15T04:38:45,104 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:45,104 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=C 2024-12-15T04:38:45,104 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:45,106 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121572efecd8b0794d758574b59ebc839e49_574a05e47406cea06ff474376a420947 store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:45,108 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121572efecd8b0794d758574b59ebc839e49_574a05e47406cea06ff474376a420947, store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:45,109 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121572efecd8b0794d758574b59ebc839e49_574a05e47406cea06ff474376a420947 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:45,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237585111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237585111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237585114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,121 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741986_1162 (size=12359) 2024-12-15T04:38:45,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237585115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237585116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-15T04:38:45,137 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121588a0a3bc3d3a4b9ca712516add04b871_574a05e47406cea06ff474376a420947 is 50, key is test_row_0/A:col10/1734237525098/Put/seqid=0 2024-12-15T04:38:45,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741987_1163 (size=4469) 2024-12-15T04:38:45,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741988_1164 (size=12304) 2024-12-15T04:38:45,184 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,185 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-15T04:38:45,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:45,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:45,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:45,185 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:45,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:45,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:45,220 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237585217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237585218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,221 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237585218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,224 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237585222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237585223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-15T04:38:45,337 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,338 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-15T04:38:45,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:45,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:45,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:45,338 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:45,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:45,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:45,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237585421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,426 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237585422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,427 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237585424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237585427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237585427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,491 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,491 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-15T04:38:45,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:45,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:45,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:45,492 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:45,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:45,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:45,529 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/ab653a7a5b9e4b59a04db1723c7cc2a1 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/ab653a7a5b9e4b59a04db1723c7cc2a1 2024-12-15T04:38:45,536 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/B of 574a05e47406cea06ff474376a420947 into ab653a7a5b9e4b59a04db1723c7cc2a1(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:45,536 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:45,536 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/B, priority=13, startTime=1734237525086; duration=0sec 2024-12-15T04:38:45,536 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:45,536 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:B 2024-12-15T04:38:45,536 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:45,539 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:45,539 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/C is initiating minor compaction (all files) 2024-12-15T04:38:45,539 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/C in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:45,539 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/62bb6883a73841b898666885138865eb, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/2052d82ea40f45f1b546b62a16d50c24, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/d3e2d92de699401a90d18c44b8332db8] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=35.4 K 2024-12-15T04:38:45,541 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 62bb6883a73841b898666885138865eb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1734237522969 2024-12-15T04:38:45,542 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 2052d82ea40f45f1b546b62a16d50c24, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1734237523660 2024-12-15T04:38:45,543 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting d3e2d92de699401a90d18c44b8332db8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1734237524327 2024-12-15T04:38:45,551 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#A#compaction#136 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:45,552 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/477d70a868d84223884c667ff56ec0ca is 175, key is test_row_0/A:col10/1734237524943/Put/seqid=0 2024-12-15T04:38:45,552 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#C#compaction#138 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:45,553 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/2feeb7f3a1854f239fc3b2b7e69886c5 is 50, key is test_row_0/C:col10/1734237524943/Put/seqid=0 2024-12-15T04:38:45,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741989_1165 (size=31313) 2024-12-15T04:38:45,570 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:45,581 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121588a0a3bc3d3a4b9ca712516add04b871_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121588a0a3bc3d3a4b9ca712516add04b871_574a05e47406cea06ff474376a420947 2024-12-15T04:38:45,585 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/477d70a868d84223884c667ff56ec0ca as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/477d70a868d84223884c667ff56ec0ca 2024-12-15T04:38:45,585 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/1dd5f66f6e44404e9d214729816e3c22, store: [table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:45,586 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/1dd5f66f6e44404e9d214729816e3c22 is 175, key is test_row_0/A:col10/1734237525098/Put/seqid=0 2024-12-15T04:38:45,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741990_1166 (size=12359) 2024-12-15T04:38:45,601 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/A of 574a05e47406cea06ff474376a420947 into 477d70a868d84223884c667ff56ec0ca(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:45,601 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:45,601 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/A, priority=13, startTime=1734237525085; duration=0sec 2024-12-15T04:38:45,602 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:45,602 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:A 2024-12-15T04:38:45,604 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/2feeb7f3a1854f239fc3b2b7e69886c5 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/2feeb7f3a1854f239fc3b2b7e69886c5 2024-12-15T04:38:45,613 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/C of 574a05e47406cea06ff474376a420947 into 2feeb7f3a1854f239fc3b2b7e69886c5(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:45,613 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:45,613 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/C, priority=13, startTime=1734237525086; duration=0sec 2024-12-15T04:38:45,613 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:45,613 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:C 2024-12-15T04:38:45,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741991_1167 (size=31105) 2024-12-15T04:38:45,618 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=160, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/1dd5f66f6e44404e9d214729816e3c22 2024-12-15T04:38:45,632 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/de483aa5c05a44ae93daffb245b6b106 is 50, key is test_row_0/B:col10/1734237525098/Put/seqid=0 2024-12-15T04:38:45,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-15T04:38:45,644 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,645 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-15T04:38:45,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:45,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:45,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:45,645 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:45,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741992_1168 (size=12151) 2024-12-15T04:38:45,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:45,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:45,647 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/de483aa5c05a44ae93daffb245b6b106 2024-12-15T04:38:45,660 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/6f3d2ff4bb5c46c6b52536b5fb7df68e is 50, key is test_row_0/C:col10/1734237525098/Put/seqid=0 2024-12-15T04:38:45,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741993_1169 (size=12151) 2024-12-15T04:38:45,681 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=160 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/6f3d2ff4bb5c46c6b52536b5fb7df68e 2024-12-15T04:38:45,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/1dd5f66f6e44404e9d214729816e3c22 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/1dd5f66f6e44404e9d214729816e3c22 2024-12-15T04:38:45,692 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/1dd5f66f6e44404e9d214729816e3c22, entries=150, sequenceid=160, filesize=30.4 K 2024-12-15T04:38:45,693 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/de483aa5c05a44ae93daffb245b6b106 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/de483aa5c05a44ae93daffb245b6b106 2024-12-15T04:38:45,699 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/de483aa5c05a44ae93daffb245b6b106, entries=150, sequenceid=160, filesize=11.9 K 2024-12-15T04:38:45,701 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/6f3d2ff4bb5c46c6b52536b5fb7df68e as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/6f3d2ff4bb5c46c6b52536b5fb7df68e 2024-12-15T04:38:45,707 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/6f3d2ff4bb5c46c6b52536b5fb7df68e, entries=150, sequenceid=160, filesize=11.9 K 2024-12-15T04:38:45,708 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 574a05e47406cea06ff474376a420947 in 609ms, sequenceid=160, compaction requested=false 2024-12-15T04:38:45,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:45,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 574a05e47406cea06ff474376a420947 2024-12-15T04:38:45,728 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 574a05e47406cea06ff474376a420947 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-15T04:38:45,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=A 2024-12-15T04:38:45,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:45,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=B 2024-12-15T04:38:45,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:45,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=C 2024-12-15T04:38:45,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:45,750 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215e86e2ca3820c460fb99363b26dae8779_574a05e47406cea06ff474376a420947 is 50, key is test_row_0/A:col10/1734237525113/Put/seqid=0 2024-12-15T04:38:45,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741994_1170 (size=12304) 2024-12-15T04:38:45,799 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,799 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-15T04:38:45,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:45,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:45,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:45,800 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:45,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:45,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:45,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237585796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,803 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237585799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237585801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,804 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237585802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237585802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,904 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237585903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,907 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237585904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237585905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237585906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:45,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237585906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,951 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:45,951 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-15T04:38:45,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:45,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:45,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:45,952 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:45,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:45,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:46,105 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,105 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-15T04:38:46,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:46,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:46,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:46,105 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:46,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:46,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:46,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237586108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,109 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,109 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237586109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237586109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237586109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237586110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-15T04:38:46,156 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:46,163 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215e86e2ca3820c460fb99363b26dae8779_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215e86e2ca3820c460fb99363b26dae8779_574a05e47406cea06ff474376a420947 2024-12-15T04:38:46,164 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/c045702c16c746ceae8cd215077d595d, store: [table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:46,165 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/c045702c16c746ceae8cd215077d595d is 175, key is test_row_0/A:col10/1734237525113/Put/seqid=0 2024-12-15T04:38:46,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741995_1171 (size=31105) 2024-12-15T04:38:46,176 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=174, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/c045702c16c746ceae8cd215077d595d 2024-12-15T04:38:46,186 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/bfa524c333134ebfb511320b1f8ee391 is 50, key is test_row_0/B:col10/1734237525113/Put/seqid=0 2024-12-15T04:38:46,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741996_1172 (size=12151) 2024-12-15T04:38:46,199 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/bfa524c333134ebfb511320b1f8ee391 2024-12-15T04:38:46,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/7d7dcd96436d4a50b8186f639df9ba8e is 50, key is test_row_0/C:col10/1734237525113/Put/seqid=0 2024-12-15T04:38:46,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741997_1173 (size=12151) 2024-12-15T04:38:46,218 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/7d7dcd96436d4a50b8186f639df9ba8e 2024-12-15T04:38:46,225 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/c045702c16c746ceae8cd215077d595d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/c045702c16c746ceae8cd215077d595d 2024-12-15T04:38:46,230 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/c045702c16c746ceae8cd215077d595d, entries=150, sequenceid=174, filesize=30.4 K 2024-12-15T04:38:46,232 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/bfa524c333134ebfb511320b1f8ee391 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/bfa524c333134ebfb511320b1f8ee391 2024-12-15T04:38:46,238 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/bfa524c333134ebfb511320b1f8ee391, entries=150, sequenceid=174, filesize=11.9 K 2024-12-15T04:38:46,240 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/7d7dcd96436d4a50b8186f639df9ba8e as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/7d7dcd96436d4a50b8186f639df9ba8e 2024-12-15T04:38:46,247 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/7d7dcd96436d4a50b8186f639df9ba8e, entries=150, sequenceid=174, filesize=11.9 K 2024-12-15T04:38:46,248 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 574a05e47406cea06ff474376a420947 in 520ms, sequenceid=174, compaction requested=true 2024-12-15T04:38:46,248 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:46,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:38:46,248 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:46,248 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:46,249 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:46,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:38:46,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:46,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:38:46,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:46,250 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93523 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:46,250 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/A is initiating minor compaction (all files) 2024-12-15T04:38:46,250 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/A in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:46,250 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/477d70a868d84223884c667ff56ec0ca, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/1dd5f66f6e44404e9d214729816e3c22, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/c045702c16c746ceae8cd215077d595d] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=91.3 K 2024-12-15T04:38:46,250 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:46,250 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. files: [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/477d70a868d84223884c667ff56ec0ca, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/1dd5f66f6e44404e9d214729816e3c22, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/c045702c16c746ceae8cd215077d595d] 2024-12-15T04:38:46,251 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:46,251 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/B is initiating minor compaction (all files) 2024-12-15T04:38:46,251 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/B in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:46,251 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/ab653a7a5b9e4b59a04db1723c7cc2a1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/de483aa5c05a44ae93daffb245b6b106, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/bfa524c333134ebfb511320b1f8ee391] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=35.8 K 2024-12-15T04:38:46,252 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 477d70a868d84223884c667ff56ec0ca, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1734237524327 2024-12-15T04:38:46,252 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting ab653a7a5b9e4b59a04db1723c7cc2a1, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1734237524327 2024-12-15T04:38:46,253 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting de483aa5c05a44ae93daffb245b6b106, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734237525097 2024-12-15T04:38:46,253 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1dd5f66f6e44404e9d214729816e3c22, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734237525097 2024-12-15T04:38:46,254 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting bfa524c333134ebfb511320b1f8ee391, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1734237525110 2024-12-15T04:38:46,254 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting c045702c16c746ceae8cd215077d595d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1734237525110 2024-12-15T04:38:46,258 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,258 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-12-15T04:38:46,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:46,259 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing 574a05e47406cea06ff474376a420947 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-15T04:38:46,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=A 2024-12-15T04:38:46,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:46,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=B 2024-12-15T04:38:46,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:46,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=C 2024-12-15T04:38:46,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:46,263 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:46,263 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#B#compaction#144 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:46,264 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/064e6a4a17844d84b7eec32744b7c10d is 50, key is test_row_0/B:col10/1734237525113/Put/seqid=0 2024-12-15T04:38:46,282 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412150cb2a9633615435ab40abd42c5bf743f_574a05e47406cea06ff474376a420947 store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:46,285 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412150cb2a9633615435ab40abd42c5bf743f_574a05e47406cea06ff474376a420947, store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:46,285 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412150cb2a9633615435ab40abd42c5bf743f_574a05e47406cea06ff474376a420947 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:46,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215a8168e5eb5274a69a43f7e40f05c48db_574a05e47406cea06ff474376a420947 is 50, key is test_row_0/A:col10/1734237525800/Put/seqid=0 2024-12-15T04:38:46,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741999_1175 (size=4469) 2024-12-15T04:38:46,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741998_1174 (size=12561) 2024-12-15T04:38:46,311 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/064e6a4a17844d84b7eec32744b7c10d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/064e6a4a17844d84b7eec32744b7c10d 2024-12-15T04:38:46,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742000_1176 (size=12304) 2024-12-15T04:38:46,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:46,321 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/B of 574a05e47406cea06ff474376a420947 into 064e6a4a17844d84b7eec32744b7c10d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:46,321 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:46,322 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/B, priority=13, startTime=1734237526248; duration=0sec 2024-12-15T04:38:46,322 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:46,322 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:B 2024-12-15T04:38:46,322 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:46,324 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215a8168e5eb5274a69a43f7e40f05c48db_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215a8168e5eb5274a69a43f7e40f05c48db_574a05e47406cea06ff474376a420947 2024-12-15T04:38:46,326 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:46,326 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/C is initiating minor compaction (all files) 2024-12-15T04:38:46,326 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/C in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:46,326 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/2feeb7f3a1854f239fc3b2b7e69886c5, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/6f3d2ff4bb5c46c6b52536b5fb7df68e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/7d7dcd96436d4a50b8186f639df9ba8e] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=35.8 K 2024-12-15T04:38:46,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/4e646df080f24e3c94429dfde25dc35c, store: [table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:46,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/4e646df080f24e3c94429dfde25dc35c is 175, key is test_row_0/A:col10/1734237525800/Put/seqid=0 2024-12-15T04:38:46,328 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 2feeb7f3a1854f239fc3b2b7e69886c5, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1734237524327 2024-12-15T04:38:46,330 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f3d2ff4bb5c46c6b52536b5fb7df68e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=160, earliestPutTs=1734237525097 2024-12-15T04:38:46,330 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d7dcd96436d4a50b8186f639df9ba8e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1734237525110 2024-12-15T04:38:46,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742001_1177 (size=31105) 2024-12-15T04:38:46,345 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#C#compaction#147 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:46,345 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/aa62b59d58b344e487a8c31aa383c004 is 50, key is test_row_0/C:col10/1734237525113/Put/seqid=0 2024-12-15T04:38:46,346 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=200, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/4e646df080f24e3c94429dfde25dc35c 2024-12-15T04:38:46,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742002_1178 (size=12561) 2024-12-15T04:38:46,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/791e61b9fa844479997859aea57fc02c is 50, key is test_row_0/B:col10/1734237525800/Put/seqid=0 2024-12-15T04:38:46,365 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/aa62b59d58b344e487a8c31aa383c004 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/aa62b59d58b344e487a8c31aa383c004 2024-12-15T04:38:46,371 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/C of 574a05e47406cea06ff474376a420947 into aa62b59d58b344e487a8c31aa383c004(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:46,371 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:46,371 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/C, priority=13, startTime=1734237526249; duration=0sec 2024-12-15T04:38:46,371 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:46,371 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:C 2024-12-15T04:38:46,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742003_1179 (size=12151) 2024-12-15T04:38:46,374 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/791e61b9fa844479997859aea57fc02c 2024-12-15T04:38:46,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/3e50d4bbf5724fb38cbcd3eadf1947dc is 50, key is test_row_0/C:col10/1734237525800/Put/seqid=0 2024-12-15T04:38:46,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742004_1180 (size=12151) 2024-12-15T04:38:46,409 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/3e50d4bbf5724fb38cbcd3eadf1947dc 2024-12-15T04:38:46,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 574a05e47406cea06ff474376a420947 2024-12-15T04:38:46,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:46,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/4e646df080f24e3c94429dfde25dc35c as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/4e646df080f24e3c94429dfde25dc35c 2024-12-15T04:38:46,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237586419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,425 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/4e646df080f24e3c94429dfde25dc35c, entries=150, sequenceid=200, filesize=30.4 K 2024-12-15T04:38:46,425 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237586422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/791e61b9fa844479997859aea57fc02c as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/791e61b9fa844479997859aea57fc02c 2024-12-15T04:38:46,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237586425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237586425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237586426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,434 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/791e61b9fa844479997859aea57fc02c, entries=150, sequenceid=200, filesize=11.9 K 2024-12-15T04:38:46,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/3e50d4bbf5724fb38cbcd3eadf1947dc as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/3e50d4bbf5724fb38cbcd3eadf1947dc 2024-12-15T04:38:46,442 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/3e50d4bbf5724fb38cbcd3eadf1947dc, entries=150, sequenceid=200, filesize=11.9 K 2024-12-15T04:38:46,444 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=60.38 KB/61830 for 574a05e47406cea06ff474376a420947 in 185ms, sequenceid=200, compaction requested=false 2024-12-15T04:38:46,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:46,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:46,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-12-15T04:38:46,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-12-15T04:38:46,448 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-12-15T04:38:46,448 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4140 sec 2024-12-15T04:38:46,452 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 1.4230 sec 2024-12-15T04:38:46,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 574a05e47406cea06ff474376a420947 2024-12-15T04:38:46,526 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 574a05e47406cea06ff474376a420947 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-15T04:38:46,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=A 2024-12-15T04:38:46,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:46,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=B 2024-12-15T04:38:46,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:46,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=C 2024-12-15T04:38:46,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:46,535 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121500b3455322c64f9e9c82b9e5ce0b0b02_574a05e47406cea06ff474376a420947 is 50, key is test_row_0/A:col10/1734237526525/Put/seqid=0 2024-12-15T04:38:46,544 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237586543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237586542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237586544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237586544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237586544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742005_1181 (size=14794) 2024-12-15T04:38:46,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237586646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237586646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237586647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237586646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,648 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237586647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,700 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#A#compaction#145 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:46,700 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/609e0584076e47d6ae6b02e084ac6411 is 175, key is test_row_0/A:col10/1734237525113/Put/seqid=0 2024-12-15T04:38:46,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742006_1182 (size=31515) 2024-12-15T04:38:46,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237586848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237586848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237586848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237586848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:46,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237586850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:46,959 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:46,963 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121500b3455322c64f9e9c82b9e5ce0b0b02_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121500b3455322c64f9e9c82b9e5ce0b0b02_574a05e47406cea06ff474376a420947 2024-12-15T04:38:46,964 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/9c171efca7fa402888c51683be27ebaf, store: [table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:46,965 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/9c171efca7fa402888c51683be27ebaf is 175, key is test_row_0/A:col10/1734237526525/Put/seqid=0 2024-12-15T04:38:46,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742007_1183 (size=39749) 2024-12-15T04:38:47,113 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/609e0584076e47d6ae6b02e084ac6411 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/609e0584076e47d6ae6b02e084ac6411 2024-12-15T04:38:47,118 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/A of 574a05e47406cea06ff474376a420947 into 609e0584076e47d6ae6b02e084ac6411(size=30.8 K), total size for store is 61.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:47,118 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:47,118 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/A, priority=13, startTime=1734237526248; duration=0sec 2024-12-15T04:38:47,118 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:47,118 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:A 2024-12-15T04:38:47,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-12-15T04:38:47,134 INFO [Thread-635 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-12-15T04:38:47,135 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:38:47,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-12-15T04:38:47,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-15T04:38:47,136 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:38:47,137 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:38:47,137 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:38:47,155 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:47,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237587153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:47,155 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:47,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237587153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:47,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:47,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:47,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237587154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:47,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237587154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:47,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:47,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237587154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:47,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-15T04:38:47,288 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:47,288 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-15T04:38:47,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:47,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:47,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:47,289 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:47,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:47,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:47,386 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=215, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/9c171efca7fa402888c51683be27ebaf 2024-12-15T04:38:47,396 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/47549256eda94b7691b3cabb10fc83fe is 50, key is test_row_0/B:col10/1734237526525/Put/seqid=0 2024-12-15T04:38:47,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742008_1184 (size=12151) 2024-12-15T04:38:47,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-15T04:38:47,440 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:47,441 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-15T04:38:47,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:47,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:47,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:47,441 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:47,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:47,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:47,599 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:47,600 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-15T04:38:47,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:47,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:47,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:47,600 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:47,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:47,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:47,658 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:47,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237587657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:47,658 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:47,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237587657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:47,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:47,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237587658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:47,660 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:47,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237587659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:47,661 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:47,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237587661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:47,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-15T04:38:47,752 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:47,753 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-15T04:38:47,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:47,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:47,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:47,753 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:47,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:47,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:47,805 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/47549256eda94b7691b3cabb10fc83fe 2024-12-15T04:38:47,826 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/1fdf7f1ec255427bad313cfb5f566d47 is 50, key is test_row_0/C:col10/1734237526525/Put/seqid=0 2024-12-15T04:38:47,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742009_1185 (size=12151) 2024-12-15T04:38:47,854 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/1fdf7f1ec255427bad313cfb5f566d47 2024-12-15T04:38:47,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/9c171efca7fa402888c51683be27ebaf as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/9c171efca7fa402888c51683be27ebaf 2024-12-15T04:38:47,865 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/9c171efca7fa402888c51683be27ebaf, entries=200, sequenceid=215, filesize=38.8 K 2024-12-15T04:38:47,866 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/47549256eda94b7691b3cabb10fc83fe as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/47549256eda94b7691b3cabb10fc83fe 2024-12-15T04:38:47,871 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/47549256eda94b7691b3cabb10fc83fe, entries=150, sequenceid=215, filesize=11.9 K 2024-12-15T04:38:47,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/1fdf7f1ec255427bad313cfb5f566d47 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/1fdf7f1ec255427bad313cfb5f566d47 2024-12-15T04:38:47,878 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/1fdf7f1ec255427bad313cfb5f566d47, entries=150, sequenceid=215, filesize=11.9 K 2024-12-15T04:38:47,879 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 574a05e47406cea06ff474376a420947 in 1353ms, sequenceid=215, compaction requested=true 2024-12-15T04:38:47,879 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:47,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:38:47,880 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:47,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:47,880 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:47,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:38:47,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:47,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:38:47,880 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:47,881 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102369 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:47,881 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/A is initiating minor compaction (all files) 2024-12-15T04:38:47,881 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/A in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:47,881 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/609e0584076e47d6ae6b02e084ac6411, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/4e646df080f24e3c94429dfde25dc35c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/9c171efca7fa402888c51683be27ebaf] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=100.0 K 2024-12-15T04:38:47,881 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:47,882 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. files: [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/609e0584076e47d6ae6b02e084ac6411, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/4e646df080f24e3c94429dfde25dc35c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/9c171efca7fa402888c51683be27ebaf] 2024-12-15T04:38:47,882 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:47,882 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/B is initiating minor compaction (all files) 2024-12-15T04:38:47,882 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 609e0584076e47d6ae6b02e084ac6411, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1734237525110 2024-12-15T04:38:47,882 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/B in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:47,882 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/064e6a4a17844d84b7eec32744b7c10d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/791e61b9fa844479997859aea57fc02c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/47549256eda94b7691b3cabb10fc83fe] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=36.0 K 2024-12-15T04:38:47,883 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 064e6a4a17844d84b7eec32744b7c10d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1734237525110 2024-12-15T04:38:47,883 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e646df080f24e3c94429dfde25dc35c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1734237525799 2024-12-15T04:38:47,883 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 791e61b9fa844479997859aea57fc02c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1734237525799 2024-12-15T04:38:47,883 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c171efca7fa402888c51683be27ebaf, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1734237526414 2024-12-15T04:38:47,884 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 47549256eda94b7691b3cabb10fc83fe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1734237526421 2024-12-15T04:38:47,895 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:47,905 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:47,905 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-12-15T04:38:47,906 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#B#compaction#154 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:47,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:47,907 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 574a05e47406cea06ff474376a420947 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-15T04:38:47,907 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/1ff0765b4dc74dc2870422b580482892 is 50, key is test_row_0/B:col10/1734237526525/Put/seqid=0 2024-12-15T04:38:47,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=A 2024-12-15T04:38:47,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:47,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=B 2024-12-15T04:38:47,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:47,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=C 2024-12-15T04:38:47,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:47,912 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121553534b67327e4f7888d52bd484ee9b0b_574a05e47406cea06ff474376a420947 store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:47,913 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121553534b67327e4f7888d52bd484ee9b0b_574a05e47406cea06ff474376a420947, store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:47,914 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121553534b67327e4f7888d52bd484ee9b0b_574a05e47406cea06ff474376a420947 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:47,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742010_1186 (size=12663) 2024-12-15T04:38:47,923 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/1ff0765b4dc74dc2870422b580482892 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/1ff0765b4dc74dc2870422b580482892 2024-12-15T04:38:47,930 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/B of 574a05e47406cea06ff474376a420947 into 1ff0765b4dc74dc2870422b580482892(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:47,930 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:47,931 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/B, priority=13, startTime=1734237527880; duration=0sec 2024-12-15T04:38:47,931 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:47,931 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:B 2024-12-15T04:38:47,931 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:47,932 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:47,932 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/C is initiating minor compaction (all files) 2024-12-15T04:38:47,933 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/C in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:47,933 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/aa62b59d58b344e487a8c31aa383c004, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/3e50d4bbf5724fb38cbcd3eadf1947dc, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/1fdf7f1ec255427bad313cfb5f566d47] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=36.0 K 2024-12-15T04:38:47,933 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting aa62b59d58b344e487a8c31aa383c004, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1734237525110 2024-12-15T04:38:47,934 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e50d4bbf5724fb38cbcd3eadf1947dc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1734237525799 2024-12-15T04:38:47,934 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 1fdf7f1ec255427bad313cfb5f566d47, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1734237526421 2024-12-15T04:38:47,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412159cd634d2dd3a44d6b7060f56cc242783_574a05e47406cea06ff474376a420947 is 50, key is test_row_0/A:col10/1734237526541/Put/seqid=0 2024-12-15T04:38:47,949 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#C#compaction#156 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:47,950 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/8ccdaa0fd1c14116b169b120e8ff2b9d is 50, key is test_row_0/C:col10/1734237526525/Put/seqid=0 2024-12-15T04:38:47,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742011_1187 (size=4469) 2024-12-15T04:38:47,955 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#A#compaction#153 average throughput is 0.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:47,955 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/b42c2011032e4147b0233ce575c22dab is 175, key is test_row_0/A:col10/1734237526525/Put/seqid=0 2024-12-15T04:38:47,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742013_1189 (size=12663) 2024-12-15T04:38:47,984 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/8ccdaa0fd1c14116b169b120e8ff2b9d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/8ccdaa0fd1c14116b169b120e8ff2b9d 2024-12-15T04:38:47,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742014_1190 (size=31617) 2024-12-15T04:38:47,991 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/C of 574a05e47406cea06ff474376a420947 into 8ccdaa0fd1c14116b169b120e8ff2b9d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:47,991 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:47,991 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/C, priority=13, startTime=1734237527880; duration=0sec 2024-12-15T04:38:47,992 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:47,992 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:C 2024-12-15T04:38:47,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742012_1188 (size=12304) 2024-12-15T04:38:47,998 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/b42c2011032e4147b0233ce575c22dab as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/b42c2011032e4147b0233ce575c22dab 2024-12-15T04:38:47,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:48,004 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412159cd634d2dd3a44d6b7060f56cc242783_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412159cd634d2dd3a44d6b7060f56cc242783_574a05e47406cea06ff474376a420947 2024-12-15T04:38:48,004 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/A of 574a05e47406cea06ff474376a420947 into b42c2011032e4147b0233ce575c22dab(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:48,005 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:48,005 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/A, priority=13, startTime=1734237527880; duration=0sec 2024-12-15T04:38:48,005 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:48,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/aeba37bec90c424d89eaf41f4a6df1f8, store: [table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:48,006 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:A 2024-12-15T04:38:48,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/aeba37bec90c424d89eaf41f4a6df1f8 is 175, key is test_row_0/A:col10/1734237526541/Put/seqid=0 2024-12-15T04:38:48,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742015_1191 (size=31105) 2024-12-15T04:38:48,020 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=239, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/aeba37bec90c424d89eaf41f4a6df1f8 2024-12-15T04:38:48,029 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/a19a8dc1d94f44388b96692b53998f0e is 50, key is test_row_0/B:col10/1734237526541/Put/seqid=0 2024-12-15T04:38:48,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742016_1192 (size=12151) 2024-12-15T04:38:48,120 INFO [master/e56de37b85b3:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-15T04:38:48,120 INFO [master/e56de37b85b3:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-15T04:38:48,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-15T04:38:48,436 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/a19a8dc1d94f44388b96692b53998f0e 2024-12-15T04:38:48,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/63f6d90e50cd464b8d169e3368e55057 is 50, key is test_row_0/C:col10/1734237526541/Put/seqid=0 2024-12-15T04:38:48,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742017_1193 (size=12151) 2024-12-15T04:38:48,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 574a05e47406cea06ff474376a420947 2024-12-15T04:38:48,660 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:48,669 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:48,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237588667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:48,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:48,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237588668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:48,672 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:48,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237588668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:48,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:48,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237588668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:48,672 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:48,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237588669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:48,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:48,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237588773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:48,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:48,774 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:48,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237588773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:48,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237588773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:48,853 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/63f6d90e50cd464b8d169e3368e55057 2024-12-15T04:38:48,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/aeba37bec90c424d89eaf41f4a6df1f8 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/aeba37bec90c424d89eaf41f4a6df1f8 2024-12-15T04:38:48,861 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/aeba37bec90c424d89eaf41f4a6df1f8, entries=150, sequenceid=239, filesize=30.4 K 2024-12-15T04:38:48,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/a19a8dc1d94f44388b96692b53998f0e as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/a19a8dc1d94f44388b96692b53998f0e 2024-12-15T04:38:48,866 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/a19a8dc1d94f44388b96692b53998f0e, entries=150, sequenceid=239, filesize=11.9 K 2024-12-15T04:38:48,867 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/63f6d90e50cd464b8d169e3368e55057 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/63f6d90e50cd464b8d169e3368e55057 2024-12-15T04:38:48,871 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/63f6d90e50cd464b8d169e3368e55057, entries=150, sequenceid=239, filesize=11.9 K 2024-12-15T04:38:48,872 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 574a05e47406cea06ff474376a420947 in 966ms, sequenceid=239, compaction requested=false 2024-12-15T04:38:48,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:48,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:48,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-15T04:38:48,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-15T04:38:48,875 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-12-15T04:38:48,875 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7370 sec 2024-12-15T04:38:48,876 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 1.7400 sec 2024-12-15T04:38:48,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 574a05e47406cea06ff474376a420947 2024-12-15T04:38:48,976 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 574a05e47406cea06ff474376a420947 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-15T04:38:48,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=A 2024-12-15T04:38:48,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:48,976 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=B 2024-12-15T04:38:48,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:48,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=C 2024-12-15T04:38:48,977 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:48,984 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412153431b7768fcd4001b3476dc69bb3c85d_574a05e47406cea06ff474376a420947 is 50, key is test_row_0/A:col10/1734237528975/Put/seqid=0 2024-12-15T04:38:48,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:48,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237588996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:48,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:48,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237588997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:49,000 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:49,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237588998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:49,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742018_1194 (size=12404) 2024-12-15T04:38:49,002 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:49,007 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412153431b7768fcd4001b3476dc69bb3c85d_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412153431b7768fcd4001b3476dc69bb3c85d_574a05e47406cea06ff474376a420947 2024-12-15T04:38:49,008 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/ac398a6b9a9642a6bdc57c7fc38667e2, store: [table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:49,009 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/ac398a6b9a9642a6bdc57c7fc38667e2 is 175, key is test_row_0/A:col10/1734237528975/Put/seqid=0 2024-12-15T04:38:49,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742019_1195 (size=31205) 2024-12-15T04:38:49,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:49,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237589099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:49,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:49,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237589099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:49,101 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:49,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237589101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:49,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-12-15T04:38:49,240 INFO [Thread-635 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-12-15T04:38:49,241 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:38:49,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-12-15T04:38:49,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-15T04:38:49,247 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:38:49,248 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:38:49,248 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:38:49,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:49,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237589301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:49,304 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:49,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237589302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:49,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:49,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237589304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:49,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-15T04:38:49,398 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:49,398 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-15T04:38:49,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:49,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:49,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:49,399 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:49,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:49,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:49,422 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=258, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/ac398a6b9a9642a6bdc57c7fc38667e2 2024-12-15T04:38:49,430 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/1d239300ec4b43afacbba82d9c665a2e is 50, key is test_row_0/B:col10/1734237528975/Put/seqid=0 2024-12-15T04:38:49,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742020_1196 (size=12251) 2024-12-15T04:38:49,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-15T04:38:49,550 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:49,551 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-15T04:38:49,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:49,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:49,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:49,551 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:49,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:49,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:49,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:49,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237589605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:49,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:49,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237589606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:49,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:49,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237589606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:49,703 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:49,703 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-15T04:38:49,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:49,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:49,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:49,703 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:49,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:49,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:49,834 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/1d239300ec4b43afacbba82d9c665a2e 2024-12-15T04:38:49,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-15T04:38:49,845 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/1eea87604a934a358b4f608be12eb36c is 50, key is test_row_0/C:col10/1734237528975/Put/seqid=0 2024-12-15T04:38:49,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742021_1197 (size=12251) 2024-12-15T04:38:49,855 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:49,855 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-15T04:38:49,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:49,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:49,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:49,856 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:49,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:49,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:50,008 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:50,008 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-15T04:38:50,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:50,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:50,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:50,008 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:50,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:50,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:50,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:50,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237590110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:50,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:50,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237590111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:50,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:50,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237590112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:50,160 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:50,161 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-15T04:38:50,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:50,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:50,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:50,161 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:50,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:50,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:50,250 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/1eea87604a934a358b4f608be12eb36c 2024-12-15T04:38:50,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/ac398a6b9a9642a6bdc57c7fc38667e2 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/ac398a6b9a9642a6bdc57c7fc38667e2 2024-12-15T04:38:50,258 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/ac398a6b9a9642a6bdc57c7fc38667e2, entries=150, sequenceid=258, filesize=30.5 K 2024-12-15T04:38:50,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/1d239300ec4b43afacbba82d9c665a2e as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/1d239300ec4b43afacbba82d9c665a2e 2024-12-15T04:38:50,262 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/1d239300ec4b43afacbba82d9c665a2e, entries=150, sequenceid=258, filesize=12.0 K 2024-12-15T04:38:50,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/1eea87604a934a358b4f608be12eb36c as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/1eea87604a934a358b4f608be12eb36c 2024-12-15T04:38:50,267 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/1eea87604a934a358b4f608be12eb36c, entries=150, sequenceid=258, filesize=12.0 K 2024-12-15T04:38:50,268 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 574a05e47406cea06ff474376a420947 in 1292ms, sequenceid=258, compaction requested=true 2024-12-15T04:38:50,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:50,268 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:50,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:38:50,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:50,269 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:50,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:38:50,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:50,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:38:50,269 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:50,270 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93927 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:50,270 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/A is initiating minor compaction (all files) 2024-12-15T04:38:50,270 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/A in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:50,270 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/b42c2011032e4147b0233ce575c22dab, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/aeba37bec90c424d89eaf41f4a6df1f8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/ac398a6b9a9642a6bdc57c7fc38667e2] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=91.7 K 2024-12-15T04:38:50,270 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:50,270 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. files: [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/b42c2011032e4147b0233ce575c22dab, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/aeba37bec90c424d89eaf41f4a6df1f8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/ac398a6b9a9642a6bdc57c7fc38667e2] 2024-12-15T04:38:50,270 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37065 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:50,271 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/B is initiating minor compaction (all files) 2024-12-15T04:38:50,271 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/B in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:50,271 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/1ff0765b4dc74dc2870422b580482892, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/a19a8dc1d94f44388b96692b53998f0e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/1d239300ec4b43afacbba82d9c665a2e] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=36.2 K 2024-12-15T04:38:50,271 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting b42c2011032e4147b0233ce575c22dab, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1734237526421 2024-12-15T04:38:50,271 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ff0765b4dc74dc2870422b580482892, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1734237526421 2024-12-15T04:38:50,271 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting aeba37bec90c424d89eaf41f4a6df1f8, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1734237526536 2024-12-15T04:38:50,271 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac398a6b9a9642a6bdc57c7fc38667e2, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1734237528667 2024-12-15T04:38:50,271 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting a19a8dc1d94f44388b96692b53998f0e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1734237526536 2024-12-15T04:38:50,272 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d239300ec4b43afacbba82d9c665a2e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1734237528667 2024-12-15T04:38:50,278 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#B#compaction#162 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:50,278 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/bbaec87a1a0043b0b161e45e8bd84c47 is 50, key is test_row_0/B:col10/1734237528975/Put/seqid=0 2024-12-15T04:38:50,284 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:50,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742022_1198 (size=12865) 2024-12-15T04:38:50,287 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121559089497c31047daa4f4d5706191d1af_574a05e47406cea06ff474376a420947 store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:50,288 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121559089497c31047daa4f4d5706191d1af_574a05e47406cea06ff474376a420947, store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:50,288 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121559089497c31047daa4f4d5706191d1af_574a05e47406cea06ff474376a420947 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:50,294 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/bbaec87a1a0043b0b161e45e8bd84c47 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/bbaec87a1a0043b0b161e45e8bd84c47 2024-12-15T04:38:50,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742023_1199 (size=4469) 2024-12-15T04:38:50,304 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/B of 574a05e47406cea06ff474376a420947 into bbaec87a1a0043b0b161e45e8bd84c47(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:50,304 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:50,304 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/B, priority=13, startTime=1734237530269; duration=0sec 2024-12-15T04:38:50,304 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:50,304 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:B 2024-12-15T04:38:50,304 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:50,305 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37065 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:50,305 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/C is initiating minor compaction (all files) 2024-12-15T04:38:50,305 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/C in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:50,305 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/8ccdaa0fd1c14116b169b120e8ff2b9d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/63f6d90e50cd464b8d169e3368e55057, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/1eea87604a934a358b4f608be12eb36c] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=36.2 K 2024-12-15T04:38:50,307 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ccdaa0fd1c14116b169b120e8ff2b9d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1734237526421 2024-12-15T04:38:50,307 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 63f6d90e50cd464b8d169e3368e55057, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1734237526536 2024-12-15T04:38:50,307 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 1eea87604a934a358b4f608be12eb36c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1734237528667 2024-12-15T04:38:50,313 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#C#compaction#164 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:50,313 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:50,314 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/c2d2c6cd1aa544c8be765c0d91fe66d9 is 50, key is test_row_0/C:col10/1734237528975/Put/seqid=0 2024-12-15T04:38:50,314 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-12-15T04:38:50,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:50,314 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 574a05e47406cea06ff474376a420947 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-15T04:38:50,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=A 2024-12-15T04:38:50,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:50,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=B 2024-12-15T04:38:50,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:50,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=C 2024-12-15T04:38:50,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:50,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742024_1200 (size=12865) 2024-12-15T04:38:50,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215f3764152c57d4b26b9639856cb929005_574a05e47406cea06ff474376a420947 is 50, key is test_row_0/A:col10/1734237528996/Put/seqid=0 2024-12-15T04:38:50,324 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/c2d2c6cd1aa544c8be765c0d91fe66d9 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/c2d2c6cd1aa544c8be765c0d91fe66d9 2024-12-15T04:38:50,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742025_1201 (size=12454) 2024-12-15T04:38:50,333 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/C of 574a05e47406cea06ff474376a420947 into c2d2c6cd1aa544c8be765c0d91fe66d9(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:50,333 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:50,333 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/C, priority=13, startTime=1734237530269; duration=0sec 2024-12-15T04:38:50,333 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:50,333 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:C 2024-12-15T04:38:50,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:50,337 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215f3764152c57d4b26b9639856cb929005_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215f3764152c57d4b26b9639856cb929005_574a05e47406cea06ff474376a420947 2024-12-15T04:38:50,338 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/0760db6fb5a340f6b97cdd4cb6d1af6e, store: [table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:50,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/0760db6fb5a340f6b97cdd4cb6d1af6e is 175, key is test_row_0/A:col10/1734237528996/Put/seqid=0 2024-12-15T04:38:50,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742026_1202 (size=31255) 2024-12-15T04:38:50,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-15T04:38:50,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:50,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 574a05e47406cea06ff474376a420947 2024-12-15T04:38:50,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:50,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237590695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:50,696 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#A#compaction#163 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:50,697 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/a9364609ee4d4bf1bae81a801689c01b is 175, key is test_row_0/A:col10/1734237528975/Put/seqid=0 2024-12-15T04:38:50,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:50,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237590696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:50,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742027_1203 (size=31819) 2024-12-15T04:38:50,744 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=280, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/0760db6fb5a340f6b97cdd4cb6d1af6e 2024-12-15T04:38:50,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/66dc35692206408782f9b0c816349c05 is 50, key is test_row_0/B:col10/1734237528996/Put/seqid=0 2024-12-15T04:38:50,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742028_1204 (size=12301) 2024-12-15T04:38:50,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:50,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237590797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:50,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:50,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237590798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:51,002 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:51,002 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:51,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237591001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:51,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237591001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:51,115 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:51,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237591115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:51,117 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:51,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237591116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:51,120 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/a9364609ee4d4bf1bae81a801689c01b as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/a9364609ee4d4bf1bae81a801689c01b 2024-12-15T04:38:51,120 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:51,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237591119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:51,125 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/A of 574a05e47406cea06ff474376a420947 into a9364609ee4d4bf1bae81a801689c01b(size=31.1 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:51,125 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:51,125 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/A, priority=13, startTime=1734237530268; duration=0sec 2024-12-15T04:38:51,125 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:51,125 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:A 2024-12-15T04:38:51,170 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/66dc35692206408782f9b0c816349c05 2024-12-15T04:38:51,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/7c41a3fe5e814aa3bb71f98762a5f9b9 is 50, key is test_row_0/C:col10/1734237528996/Put/seqid=0 2024-12-15T04:38:51,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742029_1205 (size=12301) 2024-12-15T04:38:51,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:51,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237591303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:51,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:51,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237591305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:51,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-15T04:38:51,580 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=280 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/7c41a3fe5e814aa3bb71f98762a5f9b9 2024-12-15T04:38:51,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/0760db6fb5a340f6b97cdd4cb6d1af6e as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/0760db6fb5a340f6b97cdd4cb6d1af6e 2024-12-15T04:38:51,589 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/0760db6fb5a340f6b97cdd4cb6d1af6e, entries=150, sequenceid=280, filesize=30.5 K 2024-12-15T04:38:51,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/66dc35692206408782f9b0c816349c05 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/66dc35692206408782f9b0c816349c05 2024-12-15T04:38:51,594 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/66dc35692206408782f9b0c816349c05, entries=150, sequenceid=280, filesize=12.0 K 2024-12-15T04:38:51,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/7c41a3fe5e814aa3bb71f98762a5f9b9 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/7c41a3fe5e814aa3bb71f98762a5f9b9 2024-12-15T04:38:51,599 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/7c41a3fe5e814aa3bb71f98762a5f9b9, entries=150, sequenceid=280, filesize=12.0 K 2024-12-15T04:38:51,600 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 574a05e47406cea06ff474376a420947 in 1286ms, sequenceid=280, compaction requested=false 2024-12-15T04:38:51,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:51,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:51,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-12-15T04:38:51,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-12-15T04:38:51,603 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-15T04:38:51,603 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3530 sec 2024-12-15T04:38:51,604 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 2.3620 sec 2024-12-15T04:38:51,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 574a05e47406cea06ff474376a420947 2024-12-15T04:38:51,812 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 574a05e47406cea06ff474376a420947 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-15T04:38:51,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=A 2024-12-15T04:38:51,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:51,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=B 2024-12-15T04:38:51,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:51,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=C 2024-12-15T04:38:51,813 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:51,832 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215b03ba245080a43468377add7a6e79ef6_574a05e47406cea06ff474376a420947 is 50, key is test_row_0/A:col10/1734237531811/Put/seqid=0 2024-12-15T04:38:51,832 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:51,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237591830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:51,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:51,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237591832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:51,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742030_1206 (size=17534) 2024-12-15T04:38:51,933 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:51,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237591933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:51,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:51,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237591934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:52,136 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:52,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237592134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:52,136 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:52,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237592136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:52,237 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:52,241 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215b03ba245080a43468377add7a6e79ef6_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215b03ba245080a43468377add7a6e79ef6_574a05e47406cea06ff474376a420947 2024-12-15T04:38:52,241 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/b67bea2dcaf2441b8783ad25ab06f031, store: [table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:52,242 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/b67bea2dcaf2441b8783ad25ab06f031 is 175, key is test_row_0/A:col10/1734237531811/Put/seqid=0 2024-12-15T04:38:52,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742031_1207 (size=48639) 2024-12-15T04:38:52,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:52,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237592437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:52,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:52,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237592438, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:52,646 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=299, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/b67bea2dcaf2441b8783ad25ab06f031 2024-12-15T04:38:52,653 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/c5be3f227128493bb24d92fb9f52c0b2 is 50, key is test_row_0/B:col10/1734237531811/Put/seqid=0 2024-12-15T04:38:52,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742032_1208 (size=12301) 2024-12-15T04:38:52,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:52,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237592940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:52,942 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:52,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237592941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:53,057 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/c5be3f227128493bb24d92fb9f52c0b2 2024-12-15T04:38:53,064 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/db05a77cd46a41e5b5b1792c1fb734ab is 50, key is test_row_0/C:col10/1734237531811/Put/seqid=0 2024-12-15T04:38:53,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742033_1209 (size=12301) 2024-12-15T04:38:53,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:53,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237593124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:53,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:53,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237593124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:53,125 DEBUG [Thread-631 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4128 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., hostname=e56de37b85b3,43199,1734237482035, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:38:53,125 DEBUG [Thread-625 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4128 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., hostname=e56de37b85b3,43199,1734237482035, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:38:53,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:53,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237593126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:53,126 DEBUG [Thread-633 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4128 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., hostname=e56de37b85b3,43199,1734237482035, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:38:53,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-15T04:38:53,346 INFO [Thread-635 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-12-15T04:38:53,347 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:38:53,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-12-15T04:38:53,348 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:38:53,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-15T04:38:53,349 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:38:53,349 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:38:53,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-15T04:38:53,468 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/db05a77cd46a41e5b5b1792c1fb734ab 2024-12-15T04:38:53,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/b67bea2dcaf2441b8783ad25ab06f031 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/b67bea2dcaf2441b8783ad25ab06f031 2024-12-15T04:38:53,476 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/b67bea2dcaf2441b8783ad25ab06f031, entries=250, sequenceid=299, filesize=47.5 K 2024-12-15T04:38:53,477 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/c5be3f227128493bb24d92fb9f52c0b2 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/c5be3f227128493bb24d92fb9f52c0b2 2024-12-15T04:38:53,481 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/c5be3f227128493bb24d92fb9f52c0b2, entries=150, sequenceid=299, filesize=12.0 K 2024-12-15T04:38:53,482 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/db05a77cd46a41e5b5b1792c1fb734ab as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/db05a77cd46a41e5b5b1792c1fb734ab 2024-12-15T04:38:53,486 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/db05a77cd46a41e5b5b1792c1fb734ab, entries=150, sequenceid=299, filesize=12.0 K 2024-12-15T04:38:53,487 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 574a05e47406cea06ff474376a420947 in 1675ms, sequenceid=299, compaction requested=true 2024-12-15T04:38:53,487 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:53,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:38:53,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:53,487 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:53,487 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:53,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:38:53,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:53,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:38:53,487 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:53,488 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37467 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:53,488 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111713 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:53,488 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/B is initiating minor compaction (all files) 2024-12-15T04:38:53,488 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/A is initiating minor compaction (all files) 2024-12-15T04:38:53,489 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/B in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:53,489 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/A in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:53,489 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/bbaec87a1a0043b0b161e45e8bd84c47, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/66dc35692206408782f9b0c816349c05, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/c5be3f227128493bb24d92fb9f52c0b2] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=36.6 K 2024-12-15T04:38:53,489 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/a9364609ee4d4bf1bae81a801689c01b, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/0760db6fb5a340f6b97cdd4cb6d1af6e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/b67bea2dcaf2441b8783ad25ab06f031] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=109.1 K 2024-12-15T04:38:53,489 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:53,489 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. files: [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/a9364609ee4d4bf1bae81a801689c01b, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/0760db6fb5a340f6b97cdd4cb6d1af6e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/b67bea2dcaf2441b8783ad25ab06f031] 2024-12-15T04:38:53,489 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting bbaec87a1a0043b0b161e45e8bd84c47, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1734237528667 2024-12-15T04:38:53,489 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting a9364609ee4d4bf1bae81a801689c01b, keycount=150, bloomtype=ROW, size=31.1 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1734237528667 2024-12-15T04:38:53,489 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 66dc35692206408782f9b0c816349c05, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1734237528990 2024-12-15T04:38:53,489 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0760db6fb5a340f6b97cdd4cb6d1af6e, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1734237528990 2024-12-15T04:38:53,490 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting c5be3f227128493bb24d92fb9f52c0b2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1734237530692 2024-12-15T04:38:53,490 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting b67bea2dcaf2441b8783ad25ab06f031, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1734237530692 2024-12-15T04:38:53,495 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:53,496 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#B#compaction#171 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:53,496 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/006dc74068964cb894984f57b4128d4d is 50, key is test_row_0/B:col10/1734237531811/Put/seqid=0 2024-12-15T04:38:53,497 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241215c5401396a79a40f590b058b7e566ba77_574a05e47406cea06ff474376a420947 store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:53,500 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241215c5401396a79a40f590b058b7e566ba77_574a05e47406cea06ff474376a420947, store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:53,500 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:53,500 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215c5401396a79a40f590b058b7e566ba77_574a05e47406cea06ff474376a420947 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:53,500 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-12-15T04:38:53,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:53,501 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 574a05e47406cea06ff474376a420947 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-15T04:38:53,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=A 2024-12-15T04:38:53,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:53,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=B 2024-12-15T04:38:53,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:53,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=C 2024-12-15T04:38:53,501 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:53,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742034_1210 (size=13017) 2024-12-15T04:38:53,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121505f04d9eafba459aa596ab54a269feee_574a05e47406cea06ff474376a420947 is 50, key is test_row_0/A:col10/1734237531826/Put/seqid=0 2024-12-15T04:38:53,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742035_1211 (size=4469) 2024-12-15T04:38:53,530 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#A#compaction#172 average throughput is 0.70 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:53,530 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/6eab12e14b4a415cbd45b1ce06e03789 is 175, key is test_row_0/A:col10/1734237531811/Put/seqid=0 2024-12-15T04:38:53,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742036_1212 (size=12454) 2024-12-15T04:38:53,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742037_1213 (size=31971) 2024-12-15T04:38:53,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-15T04:38:53,918 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/006dc74068964cb894984f57b4128d4d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/006dc74068964cb894984f57b4128d4d 2024-12-15T04:38:53,927 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/B of 574a05e47406cea06ff474376a420947 into 006dc74068964cb894984f57b4128d4d(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:53,927 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:53,927 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/B, priority=13, startTime=1734237533487; duration=0sec 2024-12-15T04:38:53,928 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:53,928 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:B 2024-12-15T04:38:53,928 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:53,929 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37467 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:53,929 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/C is initiating minor compaction (all files) 2024-12-15T04:38:53,929 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/C in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:53,929 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/c2d2c6cd1aa544c8be765c0d91fe66d9, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/7c41a3fe5e814aa3bb71f98762a5f9b9, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/db05a77cd46a41e5b5b1792c1fb734ab] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=36.6 K 2024-12-15T04:38:53,929 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting c2d2c6cd1aa544c8be765c0d91fe66d9, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1734237528667 2024-12-15T04:38:53,930 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c41a3fe5e814aa3bb71f98762a5f9b9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=280, earliestPutTs=1734237528990 2024-12-15T04:38:53,930 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting db05a77cd46a41e5b5b1792c1fb734ab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1734237530692 2024-12-15T04:38:53,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:53,937 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#C#compaction#174 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:53,937 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/1570ccfa3f434c9b8fd25eb804d8fadb is 50, key is test_row_0/C:col10/1734237531811/Put/seqid=0 2024-12-15T04:38:53,941 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121505f04d9eafba459aa596ab54a269feee_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121505f04d9eafba459aa596ab54a269feee_574a05e47406cea06ff474376a420947 2024-12-15T04:38:53,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/89f6cebc0b4a40459426a0362578a5ee, store: [table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:53,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/89f6cebc0b4a40459426a0362578a5ee is 175, key is test_row_0/A:col10/1734237531826/Put/seqid=0 2024-12-15T04:38:53,944 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/6eab12e14b4a415cbd45b1ce06e03789 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/6eab12e14b4a415cbd45b1ce06e03789 2024-12-15T04:38:53,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 574a05e47406cea06ff474376a420947 2024-12-15T04:38:53,944 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:53,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742038_1214 (size=13017) 2024-12-15T04:38:53,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-15T04:38:53,952 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/A of 574a05e47406cea06ff474376a420947 into 6eab12e14b4a415cbd45b1ce06e03789(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:53,952 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:53,952 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/A, priority=13, startTime=1734237533487; duration=0sec 2024-12-15T04:38:53,952 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:53,952 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:A 2024-12-15T04:38:53,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742039_1215 (size=31255) 2024-12-15T04:38:53,972 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/1570ccfa3f434c9b8fd25eb804d8fadb as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/1570ccfa3f434c9b8fd25eb804d8fadb 2024-12-15T04:38:53,978 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/C of 574a05e47406cea06ff474376a420947 into 1570ccfa3f434c9b8fd25eb804d8fadb(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:53,978 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:53,978 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/C, priority=13, startTime=1734237533487; duration=0sec 2024-12-15T04:38:53,978 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:53,978 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:C 2024-12-15T04:38:53,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:53,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237593983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:53,985 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:53,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237593984, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:54,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:54,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237594087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:54,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:54,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237594087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:54,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:54,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237594289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:54,289 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:54,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237594289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:54,355 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=318, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/89f6cebc0b4a40459426a0362578a5ee 2024-12-15T04:38:54,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/6fe22180a0504855947ca720f4af524e is 50, key is test_row_0/B:col10/1734237531826/Put/seqid=0 2024-12-15T04:38:54,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742040_1216 (size=12301) 2024-12-15T04:38:54,369 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/6fe22180a0504855947ca720f4af524e 2024-12-15T04:38:54,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/1172ba09c8cd47f8a70edf4bdfc315d2 is 50, key is test_row_0/C:col10/1734237531826/Put/seqid=0 2024-12-15T04:38:54,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742041_1217 (size=12301) 2024-12-15T04:38:54,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-15T04:38:54,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:54,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237594590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:54,592 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:54,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237594591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:54,787 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/1172ba09c8cd47f8a70edf4bdfc315d2 2024-12-15T04:38:54,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/89f6cebc0b4a40459426a0362578a5ee as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/89f6cebc0b4a40459426a0362578a5ee 2024-12-15T04:38:54,796 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/89f6cebc0b4a40459426a0362578a5ee, entries=150, sequenceid=318, filesize=30.5 K 2024-12-15T04:38:54,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/6fe22180a0504855947ca720f4af524e as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/6fe22180a0504855947ca720f4af524e 2024-12-15T04:38:54,800 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/6fe22180a0504855947ca720f4af524e, entries=150, sequenceid=318, filesize=12.0 K 2024-12-15T04:38:54,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/1172ba09c8cd47f8a70edf4bdfc315d2 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/1172ba09c8cd47f8a70edf4bdfc315d2 2024-12-15T04:38:54,805 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/1172ba09c8cd47f8a70edf4bdfc315d2, entries=150, sequenceid=318, filesize=12.0 K 2024-12-15T04:38:54,806 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 574a05e47406cea06ff474376a420947 in 1306ms, sequenceid=318, compaction requested=false 2024-12-15T04:38:54,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:54,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:54,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-12-15T04:38:54,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-12-15T04:38:54,808 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-12-15T04:38:54,808 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4580 sec 2024-12-15T04:38:54,808 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.4610 sec 2024-12-15T04:38:55,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 574a05e47406cea06ff474376a420947 2024-12-15T04:38:55,095 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 574a05e47406cea06ff474376a420947 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-15T04:38:55,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=A 2024-12-15T04:38:55,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:55,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=B 2024-12-15T04:38:55,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:55,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=C 2024-12-15T04:38:55,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:55,103 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412154aa560be307248df93b8a1d112df94c6_574a05e47406cea06ff474376a420947 is 50, key is test_row_0/A:col10/1734237533983/Put/seqid=0 2024-12-15T04:38:55,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742042_1218 (size=12454) 2024-12-15T04:38:55,109 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:55,113 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412154aa560be307248df93b8a1d112df94c6_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412154aa560be307248df93b8a1d112df94c6_574a05e47406cea06ff474376a420947 2024-12-15T04:38:55,114 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/99589292bc464ac485542ed50a56f388, store: [table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:55,114 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/99589292bc464ac485542ed50a56f388 is 175, key is test_row_0/A:col10/1734237533983/Put/seqid=0 2024-12-15T04:38:55,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742043_1219 (size=31255) 2024-12-15T04:38:55,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:55,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237595116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:55,120 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:55,120 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=340, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/99589292bc464ac485542ed50a56f388 2024-12-15T04:38:55,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237595120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:55,127 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/1b16eef30d8e40adab8ecd60600eec8e is 50, key is test_row_0/B:col10/1734237533983/Put/seqid=0 2024-12-15T04:38:55,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742044_1220 (size=12301) 2024-12-15T04:38:55,137 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/1b16eef30d8e40adab8ecd60600eec8e 2024-12-15T04:38:55,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/50ada1ac711f4bd3be4f54c82bd1704c is 50, key is test_row_0/C:col10/1734237533983/Put/seqid=0 2024-12-15T04:38:55,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742045_1221 (size=12301) 2024-12-15T04:38:55,157 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/50ada1ac711f4bd3be4f54c82bd1704c 2024-12-15T04:38:55,162 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/99589292bc464ac485542ed50a56f388 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/99589292bc464ac485542ed50a56f388 2024-12-15T04:38:55,166 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/99589292bc464ac485542ed50a56f388, entries=150, sequenceid=340, filesize=30.5 K 2024-12-15T04:38:55,167 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/1b16eef30d8e40adab8ecd60600eec8e as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/1b16eef30d8e40adab8ecd60600eec8e 2024-12-15T04:38:55,172 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/1b16eef30d8e40adab8ecd60600eec8e, entries=150, sequenceid=340, filesize=12.0 K 2024-12-15T04:38:55,172 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-15T04:38:55,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/50ada1ac711f4bd3be4f54c82bd1704c as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/50ada1ac711f4bd3be4f54c82bd1704c 2024-12-15T04:38:55,178 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/50ada1ac711f4bd3be4f54c82bd1704c, entries=150, sequenceid=340, filesize=12.0 K 2024-12-15T04:38:55,179 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 574a05e47406cea06ff474376a420947 in 84ms, sequenceid=340, compaction requested=true 2024-12-15T04:38:55,179 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:55,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:38:55,179 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:55,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:55,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:38:55,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:55,179 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:55,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:38:55,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:55,180 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:55,180 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:55,180 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/B is initiating minor compaction (all files) 2024-12-15T04:38:55,180 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/A is initiating minor compaction (all files) 2024-12-15T04:38:55,180 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/B in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:55,180 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/A in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:55,180 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/6eab12e14b4a415cbd45b1ce06e03789, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/89f6cebc0b4a40459426a0362578a5ee, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/99589292bc464ac485542ed50a56f388] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=92.3 K 2024-12-15T04:38:55,180 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/006dc74068964cb894984f57b4128d4d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/6fe22180a0504855947ca720f4af524e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/1b16eef30d8e40adab8ecd60600eec8e] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=36.7 K 2024-12-15T04:38:55,180 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:55,181 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. files: [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/6eab12e14b4a415cbd45b1ce06e03789, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/89f6cebc0b4a40459426a0362578a5ee, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/99589292bc464ac485542ed50a56f388] 2024-12-15T04:38:55,181 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 006dc74068964cb894984f57b4128d4d, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1734237530692 2024-12-15T04:38:55,181 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6eab12e14b4a415cbd45b1ce06e03789, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1734237530692 2024-12-15T04:38:55,181 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 6fe22180a0504855947ca720f4af524e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1734237531826 2024-12-15T04:38:55,181 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 89f6cebc0b4a40459426a0362578a5ee, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1734237531826 2024-12-15T04:38:55,181 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b16eef30d8e40adab8ecd60600eec8e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1734237533982 2024-12-15T04:38:55,181 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99589292bc464ac485542ed50a56f388, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1734237533982 2024-12-15T04:38:55,187 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:55,188 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#B#compaction#180 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:55,188 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/c5bb11933694429381af851b556733db is 50, key is test_row_0/B:col10/1734237533983/Put/seqid=0 2024-12-15T04:38:55,196 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121577f0d74a76fc460e99e349b58d19600e_574a05e47406cea06ff474376a420947 store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:55,197 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121577f0d74a76fc460e99e349b58d19600e_574a05e47406cea06ff474376a420947, store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:55,198 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121577f0d74a76fc460e99e349b58d19600e_574a05e47406cea06ff474376a420947 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:55,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742046_1222 (size=13119) 2024-12-15T04:38:55,216 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/c5bb11933694429381af851b556733db as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/c5bb11933694429381af851b556733db 2024-12-15T04:38:55,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742047_1223 (size=4469) 2024-12-15T04:38:55,221 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/B of 574a05e47406cea06ff474376a420947 into c5bb11933694429381af851b556733db(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:55,221 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:55,221 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/B, priority=13, startTime=1734237535179; duration=0sec 2024-12-15T04:38:55,221 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:55,221 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:B 2024-12-15T04:38:55,222 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:38:55,223 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:38:55,223 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/C is initiating minor compaction (all files) 2024-12-15T04:38:55,223 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/C in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:55,223 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/1570ccfa3f434c9b8fd25eb804d8fadb, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/1172ba09c8cd47f8a70edf4bdfc315d2, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/50ada1ac711f4bd3be4f54c82bd1704c] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=36.7 K 2024-12-15T04:38:55,223 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 1570ccfa3f434c9b8fd25eb804d8fadb, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1734237530692 2024-12-15T04:38:55,224 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 1172ba09c8cd47f8a70edf4bdfc315d2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1734237531826 2024-12-15T04:38:55,224 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 50ada1ac711f4bd3be4f54c82bd1704c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1734237533982 2024-12-15T04:38:55,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 574a05e47406cea06ff474376a420947 2024-12-15T04:38:55,227 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 574a05e47406cea06ff474376a420947 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-15T04:38:55,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=A 2024-12-15T04:38:55,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:55,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=B 2024-12-15T04:38:55,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:55,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=C 2024-12-15T04:38:55,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:55,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121561ef916e0d484a80a7470021cf0524c8_574a05e47406cea06ff474376a420947 is 50, key is test_row_0/A:col10/1734237535225/Put/seqid=0 2024-12-15T04:38:55,237 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#C#compaction#183 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:55,237 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/57be76efd37048caa681f21820930721 is 50, key is test_row_0/C:col10/1734237533983/Put/seqid=0 2024-12-15T04:38:55,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742048_1224 (size=17534) 2024-12-15T04:38:55,245 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:55,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:55,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237595248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:55,252 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:55,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237595250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:55,253 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121561ef916e0d484a80a7470021cf0524c8_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121561ef916e0d484a80a7470021cf0524c8_574a05e47406cea06ff474376a420947 2024-12-15T04:38:55,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742049_1225 (size=13119) 2024-12-15T04:38:55,256 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/de10048c258d4a58b9e56a358c0e4d7c, store: [table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:55,256 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/de10048c258d4a58b9e56a358c0e4d7c is 175, key is test_row_0/A:col10/1734237535225/Put/seqid=0 2024-12-15T04:38:55,262 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/57be76efd37048caa681f21820930721 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/57be76efd37048caa681f21820930721 2024-12-15T04:38:55,272 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/C of 574a05e47406cea06ff474376a420947 into 57be76efd37048caa681f21820930721(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:55,272 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:55,272 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/C, priority=13, startTime=1734237535179; duration=0sec 2024-12-15T04:38:55,272 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:55,272 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:C 2024-12-15T04:38:55,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742050_1226 (size=48639) 2024-12-15T04:38:55,278 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=360, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/de10048c258d4a58b9e56a358c0e4d7c 2024-12-15T04:38:55,287 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/167aad807fb449748292a3d6b592f5ba is 50, key is test_row_0/B:col10/1734237535225/Put/seqid=0 2024-12-15T04:38:55,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742051_1227 (size=12301) 2024-12-15T04:38:55,352 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:55,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237595351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:55,356 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:55,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237595353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:55,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-12-15T04:38:55,453 INFO [Thread-635 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-12-15T04:38:55,454 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:38:55,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-12-15T04:38:55,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-15T04:38:55,455 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:38:55,455 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:38:55,455 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:38:55,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:55,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237595553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:55,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-15T04:38:55,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:55,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237595558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:55,606 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:55,607 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-15T04:38:55,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:55,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:55,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:55,607 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:55,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:55,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:55,618 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#A#compaction#181 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:55,619 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/90edc218edc94c01a958b9ad9d50cc02 is 175, key is test_row_0/A:col10/1734237533983/Put/seqid=0 2024-12-15T04:38:55,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742052_1228 (size=32073) 2024-12-15T04:38:55,629 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/90edc218edc94c01a958b9ad9d50cc02 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/90edc218edc94c01a958b9ad9d50cc02 2024-12-15T04:38:55,634 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/A of 574a05e47406cea06ff474376a420947 into 90edc218edc94c01a958b9ad9d50cc02(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:55,634 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:55,634 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/A, priority=13, startTime=1734237535179; duration=0sec 2024-12-15T04:38:55,634 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:55,634 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:A 2024-12-15T04:38:55,693 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/167aad807fb449748292a3d6b592f5ba 2024-12-15T04:38:55,700 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/a0dbd19531124ad39e78d1d411d7db89 is 50, key is test_row_0/C:col10/1734237535225/Put/seqid=0 2024-12-15T04:38:55,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742053_1229 (size=12301) 2024-12-15T04:38:55,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-15T04:38:55,759 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:55,759 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-15T04:38:55,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:55,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:55,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:55,759 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:55,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:55,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:55,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:55,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237595857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:55,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:55,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237595863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:55,911 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:55,911 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-15T04:38:55,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:55,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:55,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:55,912 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:55,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:55,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:56,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-15T04:38:56,063 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:56,064 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-15T04:38:56,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:56,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:56,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:56,064 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:56,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:56,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:56,103 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/a0dbd19531124ad39e78d1d411d7db89 2024-12-15T04:38:56,108 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/de10048c258d4a58b9e56a358c0e4d7c as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/de10048c258d4a58b9e56a358c0e4d7c 2024-12-15T04:38:56,111 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/de10048c258d4a58b9e56a358c0e4d7c, entries=250, sequenceid=360, filesize=47.5 K 2024-12-15T04:38:56,112 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/167aad807fb449748292a3d6b592f5ba as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/167aad807fb449748292a3d6b592f5ba 2024-12-15T04:38:56,116 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/167aad807fb449748292a3d6b592f5ba, entries=150, sequenceid=360, filesize=12.0 K 2024-12-15T04:38:56,116 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/a0dbd19531124ad39e78d1d411d7db89 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/a0dbd19531124ad39e78d1d411d7db89 2024-12-15T04:38:56,120 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/a0dbd19531124ad39e78d1d411d7db89, entries=150, sequenceid=360, filesize=12.0 K 2024-12-15T04:38:56,121 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 574a05e47406cea06ff474376a420947 in 894ms, sequenceid=360, compaction requested=false 2024-12-15T04:38:56,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:56,216 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:56,216 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-12-15T04:38:56,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:56,217 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 574a05e47406cea06ff474376a420947 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-15T04:38:56,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=A 2024-12-15T04:38:56,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:56,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=B 2024-12-15T04:38:56,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:56,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=C 2024-12-15T04:38:56,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:56,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215ef9619968eeb43f490ec971f0fe280c1_574a05e47406cea06ff474376a420947 is 50, key is test_row_0/A:col10/1734237535249/Put/seqid=0 2024-12-15T04:38:56,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742054_1230 (size=12454) 2024-12-15T04:38:56,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:56,231 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215ef9619968eeb43f490ec971f0fe280c1_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215ef9619968eeb43f490ec971f0fe280c1_574a05e47406cea06ff474376a420947 2024-12-15T04:38:56,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/4fce53f9a55c4d3f862ab62759a5c4ec, store: [table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:56,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/4fce53f9a55c4d3f862ab62759a5c4ec is 175, key is test_row_0/A:col10/1734237535249/Put/seqid=0 2024-12-15T04:38:56,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742055_1231 (size=31255) 2024-12-15T04:38:56,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:56,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 574a05e47406cea06ff474376a420947 2024-12-15T04:38:56,403 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:56,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237596400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:56,403 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:56,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237596400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:56,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:56,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:56,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237596504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:56,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237596504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:56,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-15T04:38:56,636 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=379, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/4fce53f9a55c4d3f862ab62759a5c4ec 2024-12-15T04:38:56,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/d7ca3fbf568c4ce1af77a60ca90af05f is 50, key is test_row_0/B:col10/1734237535249/Put/seqid=0 2024-12-15T04:38:56,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742056_1232 (size=12301) 2024-12-15T04:38:56,706 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:56,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237596705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:56,706 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:56,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237596705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:57,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:57,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237597009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:57,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:57,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237597009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:57,053 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/d7ca3fbf568c4ce1af77a60ca90af05f 2024-12-15T04:38:57,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/91f109b5b8834ff9ada077780fb49875 is 50, key is test_row_0/C:col10/1734237535249/Put/seqid=0 2024-12-15T04:38:57,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742057_1233 (size=12301) 2024-12-15T04:38:57,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:57,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58208 deadline: 1734237597139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:57,142 DEBUG [Thread-633 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8144 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., hostname=e56de37b85b3,43199,1734237482035, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:38:57,146 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:57,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58172 deadline: 1734237597145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:57,147 DEBUG [Thread-625 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8150 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., hostname=e56de37b85b3,43199,1734237482035, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:38:57,166 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:57,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58154 deadline: 1734237597164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:57,166 DEBUG [Thread-631 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8170 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., hostname=e56de37b85b3,43199,1734237482035, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:38:57,486 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=379 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/91f109b5b8834ff9ada077780fb49875 2024-12-15T04:38:57,490 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/4fce53f9a55c4d3f862ab62759a5c4ec as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/4fce53f9a55c4d3f862ab62759a5c4ec 2024-12-15T04:38:57,493 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/4fce53f9a55c4d3f862ab62759a5c4ec, entries=150, sequenceid=379, filesize=30.5 K 2024-12-15T04:38:57,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/d7ca3fbf568c4ce1af77a60ca90af05f as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/d7ca3fbf568c4ce1af77a60ca90af05f 2024-12-15T04:38:57,498 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/d7ca3fbf568c4ce1af77a60ca90af05f, entries=150, sequenceid=379, filesize=12.0 K 2024-12-15T04:38:57,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/91f109b5b8834ff9ada077780fb49875 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/91f109b5b8834ff9ada077780fb49875 2024-12-15T04:38:57,502 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/91f109b5b8834ff9ada077780fb49875, entries=150, sequenceid=379, filesize=12.0 K 2024-12-15T04:38:57,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-15T04:38:57,503 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 574a05e47406cea06ff474376a420947 in 1286ms, sequenceid=379, compaction requested=true 2024-12-15T04:38:57,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:57,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:57,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-12-15T04:38:57,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-12-15T04:38:57,506 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-12-15T04:38:57,506 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0500 sec 2024-12-15T04:38:57,507 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 2.0520 sec 2024-12-15T04:38:57,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 574a05e47406cea06ff474376a420947 2024-12-15T04:38:57,517 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 574a05e47406cea06ff474376a420947 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-15T04:38:57,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=A 2024-12-15T04:38:57,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:57,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=B 2024-12-15T04:38:57,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:57,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=C 2024-12-15T04:38:57,518 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:57,525 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215b4a9dd405546408f8754a65b8e32b506_574a05e47406cea06ff474376a420947 is 50, key is test_row_0/A:col10/1734237536381/Put/seqid=0 2024-12-15T04:38:57,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742058_1234 (size=14994) 2024-12-15T04:38:57,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:57,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237597534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:57,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:57,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237597535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:57,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-12-15T04:38:57,559 INFO [Thread-635 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-12-15T04:38:57,559 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:38:57,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-12-15T04:38:57,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-15T04:38:57,561 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:38:57,561 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:38:57,561 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:38:57,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:57,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237597637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:57,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:57,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237597637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:57,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-15T04:38:57,712 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:57,713 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-15T04:38:57,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:57,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:57,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:57,713 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:57,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:57,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:57,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:57,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237597838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:57,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:57,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237597838, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:57,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-15T04:38:57,865 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:57,865 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-15T04:38:57,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:57,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:57,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:57,865 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:57,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:57,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:57,929 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:57,932 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215b4a9dd405546408f8754a65b8e32b506_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215b4a9dd405546408f8754a65b8e32b506_574a05e47406cea06ff474376a420947 2024-12-15T04:38:57,933 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/254a232b18d24ab091083c866a134f9e, store: [table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:57,934 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/254a232b18d24ab091083c866a134f9e is 175, key is test_row_0/A:col10/1734237536381/Put/seqid=0 2024-12-15T04:38:57,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742059_1235 (size=39949) 2024-12-15T04:38:58,017 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:58,018 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-15T04:38:58,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:58,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:58,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:58,018 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:58,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:58,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:58,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:58,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237598139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:58,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:58,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237598141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:58,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-15T04:38:58,170 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:58,170 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-15T04:38:58,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:58,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:58,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:58,171 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:58,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:58,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:58,323 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:58,323 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-15T04:38:58,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:58,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:58,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:58,323 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:58,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:58,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:58,338 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=400, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/254a232b18d24ab091083c866a134f9e 2024-12-15T04:38:58,344 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/43d8f0193b10418a95180cd06716931d is 50, key is test_row_0/B:col10/1734237536381/Put/seqid=0 2024-12-15T04:38:58,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742060_1236 (size=12301) 2024-12-15T04:38:58,475 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:58,475 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-15T04:38:58,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:58,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:58,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:58,476 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:58,476 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:58,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:58,627 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:58,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-15T04:38:58,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:58,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:58,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:58,628 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:58,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:58,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:58,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:58,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237598643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:58,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:58,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237598647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:58,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-15T04:38:58,748 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=400 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/43d8f0193b10418a95180cd06716931d 2024-12-15T04:38:58,755 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/44941bffdebb409ba34fa6bdfed01bc8 is 50, key is test_row_0/C:col10/1734237536381/Put/seqid=0 2024-12-15T04:38:58,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742061_1237 (size=12301) 2024-12-15T04:38:58,780 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:58,780 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-15T04:38:58,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:58,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:58,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:58,781 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:58,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:58,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:58,932 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:58,933 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-15T04:38:58,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:58,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:58,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:58,933 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:58,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:58,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:59,085 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:59,085 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-15T04:38:59,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:59,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:59,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:59,086 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:59,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:59,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:38:59,159 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=400 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/44941bffdebb409ba34fa6bdfed01bc8 2024-12-15T04:38:59,163 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/254a232b18d24ab091083c866a134f9e as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/254a232b18d24ab091083c866a134f9e 2024-12-15T04:38:59,167 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/254a232b18d24ab091083c866a134f9e, entries=200, sequenceid=400, filesize=39.0 K 2024-12-15T04:38:59,168 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/43d8f0193b10418a95180cd06716931d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/43d8f0193b10418a95180cd06716931d 2024-12-15T04:38:59,171 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/43d8f0193b10418a95180cd06716931d, entries=150, sequenceid=400, filesize=12.0 K 2024-12-15T04:38:59,172 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/44941bffdebb409ba34fa6bdfed01bc8 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/44941bffdebb409ba34fa6bdfed01bc8 2024-12-15T04:38:59,176 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/44941bffdebb409ba34fa6bdfed01bc8, entries=150, sequenceid=400, filesize=12.0 K 2024-12-15T04:38:59,177 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 574a05e47406cea06ff474376a420947 in 1660ms, sequenceid=400, compaction requested=true 2024-12-15T04:38:59,177 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:59,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:38:59,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:59,177 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-15T04:38:59,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:38:59,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:59,177 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-15T04:38:59,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:38:59,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:59,178 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50022 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-15T04:38:59,178 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 151916 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-15T04:38:59,179 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/B is initiating minor compaction (all files) 2024-12-15T04:38:59,179 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/A is initiating minor compaction (all files) 2024-12-15T04:38:59,179 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/B in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:59,179 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/A in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:59,179 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/c5bb11933694429381af851b556733db, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/167aad807fb449748292a3d6b592f5ba, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/d7ca3fbf568c4ce1af77a60ca90af05f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/43d8f0193b10418a95180cd06716931d] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=48.8 K 2024-12-15T04:38:59,179 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/90edc218edc94c01a958b9ad9d50cc02, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/de10048c258d4a58b9e56a358c0e4d7c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/4fce53f9a55c4d3f862ab62759a5c4ec, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/254a232b18d24ab091083c866a134f9e] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=148.4 K 2024-12-15T04:38:59,179 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:59,179 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. files: [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/90edc218edc94c01a958b9ad9d50cc02, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/de10048c258d4a58b9e56a358c0e4d7c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/4fce53f9a55c4d3f862ab62759a5c4ec, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/254a232b18d24ab091083c866a134f9e] 2024-12-15T04:38:59,179 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 90edc218edc94c01a958b9ad9d50cc02, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1734237533982 2024-12-15T04:38:59,179 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting c5bb11933694429381af851b556733db, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1734237533982 2024-12-15T04:38:59,180 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting de10048c258d4a58b9e56a358c0e4d7c, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1734237535107 2024-12-15T04:38:59,180 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 167aad807fb449748292a3d6b592f5ba, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1734237535115 2024-12-15T04:38:59,180 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting d7ca3fbf568c4ce1af77a60ca90af05f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1734237535246 2024-12-15T04:38:59,180 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4fce53f9a55c4d3f862ab62759a5c4ec, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1734237535246 2024-12-15T04:38:59,180 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 43d8f0193b10418a95180cd06716931d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=400, earliestPutTs=1734237536381 2024-12-15T04:38:59,180 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 254a232b18d24ab091083c866a134f9e, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=400, earliestPutTs=1734237536380 2024-12-15T04:38:59,186 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:59,188 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121534f433749ff146bfbe065a27df6ae676_574a05e47406cea06ff474376a420947 store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:59,188 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#B#compaction#193 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:59,189 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/50bb9b1189be419ea786321289b19d6a is 50, key is test_row_0/B:col10/1734237536381/Put/seqid=0 2024-12-15T04:38:59,191 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121534f433749ff146bfbe065a27df6ae676_574a05e47406cea06ff474376a420947, store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:59,191 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121534f433749ff146bfbe065a27df6ae676_574a05e47406cea06ff474376a420947 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:59,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742062_1238 (size=13255) 2024-12-15T04:38:59,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742063_1239 (size=4469) 2024-12-15T04:38:59,237 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:38:59,238 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-12-15T04:38:59,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:59,238 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 574a05e47406cea06ff474376a420947 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-15T04:38:59,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=A 2024-12-15T04:38:59,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:59,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=B 2024-12-15T04:38:59,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:59,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=C 2024-12-15T04:38:59,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:38:59,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412159bf2393c545040128e3fedc1c818519f_574a05e47406cea06ff474376a420947 is 50, key is test_row_0/A:col10/1734237537534/Put/seqid=0 2024-12-15T04:38:59,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742064_1240 (size=12454) 2024-12-15T04:38:59,600 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/50bb9b1189be419ea786321289b19d6a as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/50bb9b1189be419ea786321289b19d6a 2024-12-15T04:38:59,604 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 574a05e47406cea06ff474376a420947/B of 574a05e47406cea06ff474376a420947 into 50bb9b1189be419ea786321289b19d6a(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:59,604 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:59,604 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/B, priority=12, startTime=1734237539177; duration=0sec 2024-12-15T04:38:59,604 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:38:59,605 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:B 2024-12-15T04:38:59,605 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-15T04:38:59,606 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#A#compaction#192 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:59,606 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50022 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-15T04:38:59,606 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/C is initiating minor compaction (all files) 2024-12-15T04:38:59,606 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/C in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:38:59,606 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/57be76efd37048caa681f21820930721, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/a0dbd19531124ad39e78d1d411d7db89, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/91f109b5b8834ff9ada077780fb49875, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/44941bffdebb409ba34fa6bdfed01bc8] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=48.8 K 2024-12-15T04:38:59,606 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/ef6226a79b494154866c55edd4cd431e is 175, key is test_row_0/A:col10/1734237536381/Put/seqid=0 2024-12-15T04:38:59,607 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 57be76efd37048caa681f21820930721, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=340, earliestPutTs=1734237533982 2024-12-15T04:38:59,607 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting a0dbd19531124ad39e78d1d411d7db89, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1734237535115 2024-12-15T04:38:59,608 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 91f109b5b8834ff9ada077780fb49875, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=379, earliestPutTs=1734237535246 2024-12-15T04:38:59,608 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 44941bffdebb409ba34fa6bdfed01bc8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=400, earliestPutTs=1734237536381 2024-12-15T04:38:59,623 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#C#compaction#195 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:38:59,624 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/bbf6c79ed3954ec99aa3d602c6f8bdad is 50, key is test_row_0/C:col10/1734237536381/Put/seqid=0 2024-12-15T04:38:59,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742065_1241 (size=32209) 2024-12-15T04:38:59,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742066_1242 (size=13255) 2024-12-15T04:38:59,634 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/bbf6c79ed3954ec99aa3d602c6f8bdad as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/bbf6c79ed3954ec99aa3d602c6f8bdad 2024-12-15T04:38:59,638 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 574a05e47406cea06ff474376a420947/C of 574a05e47406cea06ff474376a420947 into bbf6c79ed3954ec99aa3d602c6f8bdad(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:38:59,638 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:38:59,638 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/C, priority=12, startTime=1734237539177; duration=0sec 2024-12-15T04:38:59,639 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:38:59,639 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:C 2024-12-15T04:38:59,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 574a05e47406cea06ff474376a420947 2024-12-15T04:38:59,646 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:38:59,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:38:59,660 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412159bf2393c545040128e3fedc1c818519f_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412159bf2393c545040128e3fedc1c818519f_574a05e47406cea06ff474376a420947 2024-12-15T04:38:59,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/df9858aad69a412dac01addd173bf5f0, store: [table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:38:59,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/df9858aad69a412dac01addd173bf5f0 is 175, key is test_row_0/A:col10/1734237537534/Put/seqid=0 2024-12-15T04:38:59,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-15T04:38:59,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742067_1243 (size=31255) 2024-12-15T04:38:59,670 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:59,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237599670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:59,672 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:59,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237599670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:59,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:59,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237599771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:59,774 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:59,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237599773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:59,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237599973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:38:59,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:38:59,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237599976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:00,029 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/ef6226a79b494154866c55edd4cd431e as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/ef6226a79b494154866c55edd4cd431e 2024-12-15T04:39:00,033 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 574a05e47406cea06ff474376a420947/A of 574a05e47406cea06ff474376a420947 into ef6226a79b494154866c55edd4cd431e(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:00,033 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:39:00,033 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/A, priority=12, startTime=1734237539177; duration=0sec 2024-12-15T04:39:00,033 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:00,033 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:A 2024-12-15T04:39:00,066 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=415, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/df9858aad69a412dac01addd173bf5f0 2024-12-15T04:39:00,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/c1ec1d4dec4d4fefbb1b67b821e1b08d is 50, key is test_row_0/B:col10/1734237537534/Put/seqid=0 2024-12-15T04:39:00,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742068_1244 (size=12301) 2024-12-15T04:39:00,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:00,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237600274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:00,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:00,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237600279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:00,476 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=415 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/c1ec1d4dec4d4fefbb1b67b821e1b08d 2024-12-15T04:39:00,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/a77a260555a94f708346a7700b59c587 is 50, key is test_row_0/C:col10/1734237537534/Put/seqid=0 2024-12-15T04:39:00,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742069_1245 (size=12301) 2024-12-15T04:39:00,540 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-15T04:39:00,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:00,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58220 deadline: 1734237600776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:00,785 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:00,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58182 deadline: 1734237600783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:00,886 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=415 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/a77a260555a94f708346a7700b59c587 2024-12-15T04:39:00,890 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/df9858aad69a412dac01addd173bf5f0 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/df9858aad69a412dac01addd173bf5f0 2024-12-15T04:39:00,893 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/df9858aad69a412dac01addd173bf5f0, entries=150, sequenceid=415, filesize=30.5 K 2024-12-15T04:39:00,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/c1ec1d4dec4d4fefbb1b67b821e1b08d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/c1ec1d4dec4d4fefbb1b67b821e1b08d 2024-12-15T04:39:00,898 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/c1ec1d4dec4d4fefbb1b67b821e1b08d, entries=150, sequenceid=415, filesize=12.0 K 2024-12-15T04:39:00,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/a77a260555a94f708346a7700b59c587 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/a77a260555a94f708346a7700b59c587 2024-12-15T04:39:00,902 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/a77a260555a94f708346a7700b59c587, entries=150, sequenceid=415, filesize=12.0 K 2024-12-15T04:39:00,903 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 574a05e47406cea06ff474376a420947 in 1665ms, sequenceid=415, compaction requested=false 2024-12-15T04:39:00,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:39:00,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:00,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-12-15T04:39:00,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-12-15T04:39:00,905 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-12-15T04:39:00,905 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.3430 sec 2024-12-15T04:39:00,906 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 3.3460 sec 2024-12-15T04:39:01,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-15T04:39:01,665 INFO [Thread-635 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-12-15T04:39:01,666 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:39:01,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-12-15T04:39:01,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-15T04:39:01,667 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:39:01,667 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:39:01,668 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:39:01,701 DEBUG [Thread-638 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08ba8425 to 127.0.0.1:55935 2024-12-15T04:39:01,701 DEBUG [Thread-638 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:39:01,702 DEBUG [Thread-636 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1a52344f to 127.0.0.1:55935 2024-12-15T04:39:01,702 DEBUG [Thread-636 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:39:01,702 DEBUG [Thread-642 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x063e87c8 to 127.0.0.1:55935 2024-12-15T04:39:01,703 DEBUG [Thread-642 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:39:01,703 DEBUG [Thread-640 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7af61386 to 127.0.0.1:55935 2024-12-15T04:39:01,703 DEBUG [Thread-640 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:39:01,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-15T04:39:01,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 574a05e47406cea06ff474376a420947 2024-12-15T04:39:01,789 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 574a05e47406cea06ff474376a420947 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-15T04:39:01,790 DEBUG [Thread-627 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ebda6ad to 127.0.0.1:55935 2024-12-15T04:39:01,790 DEBUG [Thread-627 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:39:01,790 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=A 2024-12-15T04:39:01,791 DEBUG [Thread-629 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x505d5ccd to 127.0.0.1:55935 2024-12-15T04:39:01,791 DEBUG [Thread-629 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:39:01,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:01,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=B 2024-12-15T04:39:01,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:01,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=C 2024-12-15T04:39:01,792 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:01,800 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215965149fec176478aa9386e0f7478d069_574a05e47406cea06ff474376a420947 is 50, key is test_row_0/A:col10/1734237541788/Put/seqid=0 2024-12-15T04:39:01,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742070_1246 (size=12454) 2024-12-15T04:39:01,819 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:01,819 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-15T04:39:01,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:01,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:39:01,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:01,820 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:01,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:01,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:01,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-15T04:39:01,972 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:01,973 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-15T04:39:01,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:01,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:39:01,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:01,974 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:01,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:01,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:02,128 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:02,129 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-15T04:39:02,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:02,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:39:02,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:02,130 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:02,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:02,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:02,207 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:02,218 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215965149fec176478aa9386e0f7478d069_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215965149fec176478aa9386e0f7478d069_574a05e47406cea06ff474376a420947 2024-12-15T04:39:02,221 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/1e4068d71d1e4b89b70a7ecdde7be34b, store: [table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:39:02,222 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/1e4068d71d1e4b89b70a7ecdde7be34b is 175, key is test_row_0/A:col10/1734237541788/Put/seqid=0 2024-12-15T04:39:02,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742071_1247 (size=31255) 2024-12-15T04:39:02,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-15T04:39:02,285 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:02,286 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-15T04:39:02,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:02,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:39:02,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:02,286 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:02,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:02,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:02,438 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:02,439 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-15T04:39:02,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:02,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:39:02,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:02,440 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:02,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:02,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:02,594 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:02,595 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-15T04:39:02,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:02,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:39:02,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:02,596 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:02,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:02,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:02,627 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=441, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/1e4068d71d1e4b89b70a7ecdde7be34b 2024-12-15T04:39:02,640 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/e61a19c693e244d6940c5f603d0e3734 is 50, key is test_row_0/B:col10/1734237541788/Put/seqid=0 2024-12-15T04:39:02,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742072_1248 (size=12301) 2024-12-15T04:39:02,751 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:02,751 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-15T04:39:02,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:02,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:39:02,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:02,752 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:02,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:02,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:02,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-15T04:39:02,905 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:02,906 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-15T04:39:02,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:02,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:39:02,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:02,907 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:02,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:02,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:03,047 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=441 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/e61a19c693e244d6940c5f603d0e3734 2024-12-15T04:39:03,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/41726d462d17422ea0529e968ff93e8d is 50, key is test_row_0/C:col10/1734237541788/Put/seqid=0 2024-12-15T04:39:03,061 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:03,062 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-15T04:39:03,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:03,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:39:03,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:03,062 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:03,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:03,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:03,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742073_1249 (size=12301) 2024-12-15T04:39:03,215 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:03,216 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-15T04:39:03,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:03,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:39:03,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:03,217 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:03,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:03,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:03,372 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:03,372 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-15T04:39:03,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:03,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. as already flushing 2024-12-15T04:39:03,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:03,372 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:03,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:03,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:03,465 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=441 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/41726d462d17422ea0529e968ff93e8d 2024-12-15T04:39:03,474 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/1e4068d71d1e4b89b70a7ecdde7be34b as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/1e4068d71d1e4b89b70a7ecdde7be34b 2024-12-15T04:39:03,479 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/1e4068d71d1e4b89b70a7ecdde7be34b, entries=150, sequenceid=441, filesize=30.5 K 2024-12-15T04:39:03,480 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/e61a19c693e244d6940c5f603d0e3734 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/e61a19c693e244d6940c5f603d0e3734 2024-12-15T04:39:03,485 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/e61a19c693e244d6940c5f603d0e3734, entries=150, sequenceid=441, filesize=12.0 K 2024-12-15T04:39:03,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/41726d462d17422ea0529e968ff93e8d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/41726d462d17422ea0529e968ff93e8d 2024-12-15T04:39:03,491 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/41726d462d17422ea0529e968ff93e8d, entries=150, sequenceid=441, filesize=12.0 K 2024-12-15T04:39:03,492 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for 574a05e47406cea06ff474376a420947 in 1703ms, sequenceid=441, compaction requested=true 2024-12-15T04:39:03,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:39:03,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:39:03,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:03,493 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:03,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:39:03,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:03,493 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:03,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 574a05e47406cea06ff474376a420947:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:39:03,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:03,494 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94719 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:03,494 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:03,494 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/A is initiating minor compaction (all files) 2024-12-15T04:39:03,494 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/B is initiating minor compaction (all files) 2024-12-15T04:39:03,494 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/A in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:03,494 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/B in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:03,494 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/50bb9b1189be419ea786321289b19d6a, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/c1ec1d4dec4d4fefbb1b67b821e1b08d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/e61a19c693e244d6940c5f603d0e3734] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=37.0 K 2024-12-15T04:39:03,494 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/ef6226a79b494154866c55edd4cd431e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/df9858aad69a412dac01addd173bf5f0, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/1e4068d71d1e4b89b70a7ecdde7be34b] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=92.5 K 2024-12-15T04:39:03,494 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:03,494 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. files: [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/ef6226a79b494154866c55edd4cd431e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/df9858aad69a412dac01addd173bf5f0, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/1e4068d71d1e4b89b70a7ecdde7be34b] 2024-12-15T04:39:03,495 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 50bb9b1189be419ea786321289b19d6a, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=400, earliestPutTs=1734237536381 2024-12-15T04:39:03,495 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef6226a79b494154866c55edd4cd431e, keycount=150, bloomtype=ROW, size=31.5 K, encoding=NONE, compression=NONE, seqNum=400, earliestPutTs=1734237536381 2024-12-15T04:39:03,495 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting c1ec1d4dec4d4fefbb1b67b821e1b08d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=415, earliestPutTs=1734237537521 2024-12-15T04:39:03,495 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting df9858aad69a412dac01addd173bf5f0, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=415, earliestPutTs=1734237537521 2024-12-15T04:39:03,495 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting e61a19c693e244d6940c5f603d0e3734, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1734237539670 2024-12-15T04:39:03,496 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e4068d71d1e4b89b70a7ecdde7be34b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1734237539670 2024-12-15T04:39:03,502 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#B#compaction#201 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:03,503 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/3ceee31491b1444290731ff8c9492df1 is 50, key is test_row_0/B:col10/1734237541788/Put/seqid=0 2024-12-15T04:39:03,505 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:39:03,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742074_1250 (size=13357) 2024-12-15T04:39:03,508 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241215f977c96bca33479cbf72820a9446ecf8_574a05e47406cea06ff474376a420947 store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:39:03,525 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:03,525 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-12-15T04:39:03,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:03,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:39:03,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:03,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-12-15T04:39:03,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-12-15T04:39:03,528 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-12-15T04:39:03,528 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8590 sec 2024-12-15T04:39:03,528 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241215f977c96bca33479cbf72820a9446ecf8_574a05e47406cea06ff474376a420947, store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:39:03,528 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215f977c96bca33479cbf72820a9446ecf8_574a05e47406cea06ff474376a420947 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:39:03,529 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 1.8620 sec 2024-12-15T04:39:03,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742075_1251 (size=4469) 2024-12-15T04:39:03,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-12-15T04:39:03,775 INFO [Thread-635 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-12-15T04:39:03,923 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/3ceee31491b1444290731ff8c9492df1 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/3ceee31491b1444290731ff8c9492df1 2024-12-15T04:39:03,927 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/B of 574a05e47406cea06ff474376a420947 into 3ceee31491b1444290731ff8c9492df1(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:03,927 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:39:03,927 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/B, priority=13, startTime=1734237543493; duration=0sec 2024-12-15T04:39:03,927 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:03,927 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:B 2024-12-15T04:39:03,927 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:03,928 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:03,928 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 574a05e47406cea06ff474376a420947/C is initiating minor compaction (all files) 2024-12-15T04:39:03,928 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 574a05e47406cea06ff474376a420947/C in TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:03,928 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/bbf6c79ed3954ec99aa3d602c6f8bdad, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/a77a260555a94f708346a7700b59c587, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/41726d462d17422ea0529e968ff93e8d] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp, totalSize=37.0 K 2024-12-15T04:39:03,929 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting bbf6c79ed3954ec99aa3d602c6f8bdad, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=400, earliestPutTs=1734237536381 2024-12-15T04:39:03,929 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting a77a260555a94f708346a7700b59c587, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=415, earliestPutTs=1734237537521 2024-12-15T04:39:03,929 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 41726d462d17422ea0529e968ff93e8d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=441, earliestPutTs=1734237539670 2024-12-15T04:39:03,934 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#A#compaction#202 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:03,935 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/fba98e7dd2c54b75a835682975997835 is 175, key is test_row_0/A:col10/1734237541788/Put/seqid=0 2024-12-15T04:39:03,936 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 574a05e47406cea06ff474376a420947#C#compaction#203 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:03,937 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/3ef00e5b69664932ae18df5ba53e024f is 50, key is test_row_0/C:col10/1734237541788/Put/seqid=0 2024-12-15T04:39:03,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742076_1252 (size=32311) 2024-12-15T04:39:03,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742077_1253 (size=13357) 2024-12-15T04:39:04,352 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/fba98e7dd2c54b75a835682975997835 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/fba98e7dd2c54b75a835682975997835 2024-12-15T04:39:04,352 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/3ef00e5b69664932ae18df5ba53e024f as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/3ef00e5b69664932ae18df5ba53e024f 2024-12-15T04:39:04,356 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/A of 574a05e47406cea06ff474376a420947 into fba98e7dd2c54b75a835682975997835(size=31.6 K), total size for store is 31.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:04,356 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 574a05e47406cea06ff474376a420947/C of 574a05e47406cea06ff474376a420947 into 3ef00e5b69664932ae18df5ba53e024f(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:04,356 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:39:04,356 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:39:04,356 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/A, priority=13, startTime=1734237543492; duration=0sec 2024-12-15T04:39:04,356 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947., storeName=574a05e47406cea06ff474376a420947/C, priority=13, startTime=1734237543493; duration=0sec 2024-12-15T04:39:04,356 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:04,356 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:A 2024-12-15T04:39:04,356 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:04,356 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 574a05e47406cea06ff474376a420947:C 2024-12-15T04:39:07,167 DEBUG [Thread-633 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x78cafade to 127.0.0.1:55935 2024-12-15T04:39:07,167 DEBUG [Thread-633 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:39:07,209 DEBUG [Thread-631 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x367f47f7 to 127.0.0.1:55935 2024-12-15T04:39:07,209 DEBUG [Thread-631 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:39:07,244 DEBUG [Thread-625 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7b4bd1ba to 127.0.0.1:55935 2024-12-15T04:39:07,244 DEBUG [Thread-625 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:39:07,244 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-15T04:39:07,244 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 48 2024-12-15T04:39:07,244 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 101 2024-12-15T04:39:07,244 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 100 2024-12-15T04:39:07,244 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 53 2024-12-15T04:39:07,244 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 45 2024-12-15T04:39:07,244 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-15T04:39:07,244 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8378 2024-12-15T04:39:07,244 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8190 2024-12-15T04:39:07,244 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-15T04:39:07,244 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3561 2024-12-15T04:39:07,244 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10683 rows 2024-12-15T04:39:07,244 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3523 2024-12-15T04:39:07,244 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10569 rows 2024-12-15T04:39:07,245 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-15T04:39:07,245 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x407e6b5c to 127.0.0.1:55935 2024-12-15T04:39:07,245 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:39:07,247 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-15T04:39:07,248 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-15T04:39:07,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-15T04:39:07,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-15T04:39:07,253 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237547253"}]},"ts":"1734237547253"} 2024-12-15T04:39:07,254 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-15T04:39:07,279 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-15T04:39:07,280 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-15T04:39:07,281 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=574a05e47406cea06ff474376a420947, UNASSIGN}] 2024-12-15T04:39:07,282 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=61, ppid=60, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=574a05e47406cea06ff474376a420947, UNASSIGN 2024-12-15T04:39:07,282 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=574a05e47406cea06ff474376a420947, regionState=CLOSING, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:07,283 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T04:39:07,283 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; CloseRegionProcedure 574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035}] 2024-12-15T04:39:07,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-15T04:39:07,434 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:07,436 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(124): Close 574a05e47406cea06ff474376a420947 2024-12-15T04:39:07,436 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T04:39:07,437 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1681): Closing 574a05e47406cea06ff474376a420947, disabling compactions & flushes 2024-12-15T04:39:07,437 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:07,437 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:07,437 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. after waiting 0 ms 2024-12-15T04:39:07,437 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:07,437 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(2837): Flushing 574a05e47406cea06ff474376a420947 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-12-15T04:39:07,438 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=A 2024-12-15T04:39:07,438 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:07,438 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=B 2024-12-15T04:39:07,439 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:07,439 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 574a05e47406cea06ff474376a420947, store=C 2024-12-15T04:39:07,439 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:07,446 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121546759a71b69a41a1a78a089e71b4d301_574a05e47406cea06ff474376a420947 is 50, key is test_row_0/A:col10/1734237547206/Put/seqid=0 2024-12-15T04:39:07,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742078_1254 (size=9914) 2024-12-15T04:39:07,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-15T04:39:07,852 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:07,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-15T04:39:07,862 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121546759a71b69a41a1a78a089e71b4d301_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121546759a71b69a41a1a78a089e71b4d301_574a05e47406cea06ff474376a420947 2024-12-15T04:39:07,863 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/bc7f704998c143f5b76d0f9ba5999c5d, store: [table=TestAcidGuarantees family=A region=574a05e47406cea06ff474376a420947] 2024-12-15T04:39:07,864 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/bc7f704998c143f5b76d0f9ba5999c5d is 175, key is test_row_0/A:col10/1734237547206/Put/seqid=0 2024-12-15T04:39:07,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742079_1255 (size=22561) 2024-12-15T04:39:07,970 DEBUG [master/e56de37b85b3:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 7a581d77bd6cf0246603236a6705aded changed from -1.0 to 0.0, refreshing cache 2024-12-15T04:39:08,271 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=451, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/bc7f704998c143f5b76d0f9ba5999c5d 2024-12-15T04:39:08,281 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/cfbfb85c458e4c03bad513e5d56d5141 is 50, key is test_row_0/B:col10/1734237547206/Put/seqid=0 2024-12-15T04:39:08,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742080_1256 (size=9857) 2024-12-15T04:39:08,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-15T04:39:08,687 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/cfbfb85c458e4c03bad513e5d56d5141 2024-12-15T04:39:08,698 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/9a0d77312c56473190fff3e47595f4e7 is 50, key is test_row_0/C:col10/1734237547206/Put/seqid=0 2024-12-15T04:39:08,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742081_1257 (size=9857) 2024-12-15T04:39:09,103 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=451 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/9a0d77312c56473190fff3e47595f4e7 2024-12-15T04:39:09,114 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/A/bc7f704998c143f5b76d0f9ba5999c5d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/bc7f704998c143f5b76d0f9ba5999c5d 2024-12-15T04:39:09,119 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/bc7f704998c143f5b76d0f9ba5999c5d, entries=100, sequenceid=451, filesize=22.0 K 2024-12-15T04:39:09,119 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/B/cfbfb85c458e4c03bad513e5d56d5141 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/cfbfb85c458e4c03bad513e5d56d5141 2024-12-15T04:39:09,123 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/cfbfb85c458e4c03bad513e5d56d5141, entries=100, sequenceid=451, filesize=9.6 K 2024-12-15T04:39:09,124 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/.tmp/C/9a0d77312c56473190fff3e47595f4e7 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/9a0d77312c56473190fff3e47595f4e7 2024-12-15T04:39:09,128 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/9a0d77312c56473190fff3e47595f4e7, entries=100, sequenceid=451, filesize=9.6 K 2024-12-15T04:39:09,129 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 574a05e47406cea06ff474376a420947 in 1692ms, sequenceid=451, compaction requested=false 2024-12-15T04:39:09,130 DEBUG [StoreCloser-TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/cb02d24f863a4bbda083d9c8c800e42f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/b04648c4deef44c4915a8ea9d149cf05, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/edee14bc5c724a38bb3aaf3c52ce4528, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/460a9ac95ada4f2ca870b6c7d2abd39c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/9e4df0dfac7e413db6309f755db31bb4, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/1b9c4a1e8e494e4ba0b8eb87ac493878, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/da9f891bf91344df8868cd32c6e061ef, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/6651808ca748443faadfdd099c145eb5, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/477d70a868d84223884c667ff56ec0ca, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/60adf7ece7544820a852632f0784a0ff, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/1dd5f66f6e44404e9d214729816e3c22, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/609e0584076e47d6ae6b02e084ac6411, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/c045702c16c746ceae8cd215077d595d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/4e646df080f24e3c94429dfde25dc35c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/9c171efca7fa402888c51683be27ebaf, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/b42c2011032e4147b0233ce575c22dab, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/aeba37bec90c424d89eaf41f4a6df1f8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/a9364609ee4d4bf1bae81a801689c01b, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/ac398a6b9a9642a6bdc57c7fc38667e2, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/0760db6fb5a340f6b97cdd4cb6d1af6e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/b67bea2dcaf2441b8783ad25ab06f031, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/6eab12e14b4a415cbd45b1ce06e03789, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/89f6cebc0b4a40459426a0362578a5ee, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/90edc218edc94c01a958b9ad9d50cc02, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/99589292bc464ac485542ed50a56f388, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/de10048c258d4a58b9e56a358c0e4d7c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/4fce53f9a55c4d3f862ab62759a5c4ec, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/254a232b18d24ab091083c866a134f9e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/ef6226a79b494154866c55edd4cd431e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/df9858aad69a412dac01addd173bf5f0, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/1e4068d71d1e4b89b70a7ecdde7be34b] to archive 2024-12-15T04:39:09,131 DEBUG [StoreCloser-TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-15T04:39:09,134 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/cb02d24f863a4bbda083d9c8c800e42f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/cb02d24f863a4bbda083d9c8c800e42f 2024-12-15T04:39:09,134 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/b04648c4deef44c4915a8ea9d149cf05 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/b04648c4deef44c4915a8ea9d149cf05 2024-12-15T04:39:09,134 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/edee14bc5c724a38bb3aaf3c52ce4528 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/edee14bc5c724a38bb3aaf3c52ce4528 2024-12-15T04:39:09,134 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/460a9ac95ada4f2ca870b6c7d2abd39c to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/460a9ac95ada4f2ca870b6c7d2abd39c 2024-12-15T04:39:09,135 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/9e4df0dfac7e413db6309f755db31bb4 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/9e4df0dfac7e413db6309f755db31bb4 2024-12-15T04:39:09,135 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/1b9c4a1e8e494e4ba0b8eb87ac493878 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/1b9c4a1e8e494e4ba0b8eb87ac493878 2024-12-15T04:39:09,135 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/6651808ca748443faadfdd099c145eb5 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/6651808ca748443faadfdd099c145eb5 2024-12-15T04:39:09,135 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/da9f891bf91344df8868cd32c6e061ef to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/da9f891bf91344df8868cd32c6e061ef 2024-12-15T04:39:09,136 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/477d70a868d84223884c667ff56ec0ca to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/477d70a868d84223884c667ff56ec0ca 2024-12-15T04:39:09,136 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/60adf7ece7544820a852632f0784a0ff to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/60adf7ece7544820a852632f0784a0ff 2024-12-15T04:39:09,136 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/1dd5f66f6e44404e9d214729816e3c22 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/1dd5f66f6e44404e9d214729816e3c22 2024-12-15T04:39:09,136 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/609e0584076e47d6ae6b02e084ac6411 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/609e0584076e47d6ae6b02e084ac6411 2024-12-15T04:39:09,137 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/b42c2011032e4147b0233ce575c22dab to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/b42c2011032e4147b0233ce575c22dab 2024-12-15T04:39:09,137 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/4e646df080f24e3c94429dfde25dc35c to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/4e646df080f24e3c94429dfde25dc35c 2024-12-15T04:39:09,137 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/9c171efca7fa402888c51683be27ebaf to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/9c171efca7fa402888c51683be27ebaf 2024-12-15T04:39:09,138 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/c045702c16c746ceae8cd215077d595d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/c045702c16c746ceae8cd215077d595d 2024-12-15T04:39:09,138 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/aeba37bec90c424d89eaf41f4a6df1f8 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/aeba37bec90c424d89eaf41f4a6df1f8 2024-12-15T04:39:09,138 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/a9364609ee4d4bf1bae81a801689c01b to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/a9364609ee4d4bf1bae81a801689c01b 2024-12-15T04:39:09,139 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/ac398a6b9a9642a6bdc57c7fc38667e2 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/ac398a6b9a9642a6bdc57c7fc38667e2 2024-12-15T04:39:09,139 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/0760db6fb5a340f6b97cdd4cb6d1af6e to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/0760db6fb5a340f6b97cdd4cb6d1af6e 2024-12-15T04:39:09,141 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/b67bea2dcaf2441b8783ad25ab06f031 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/b67bea2dcaf2441b8783ad25ab06f031 2024-12-15T04:39:09,141 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/6eab12e14b4a415cbd45b1ce06e03789 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/6eab12e14b4a415cbd45b1ce06e03789 2024-12-15T04:39:09,141 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/de10048c258d4a58b9e56a358c0e4d7c to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/de10048c258d4a58b9e56a358c0e4d7c 2024-12-15T04:39:09,141 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/89f6cebc0b4a40459426a0362578a5ee to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/89f6cebc0b4a40459426a0362578a5ee 2024-12-15T04:39:09,141 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/90edc218edc94c01a958b9ad9d50cc02 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/90edc218edc94c01a958b9ad9d50cc02 2024-12-15T04:39:09,142 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/99589292bc464ac485542ed50a56f388 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/99589292bc464ac485542ed50a56f388 2024-12-15T04:39:09,142 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/4fce53f9a55c4d3f862ab62759a5c4ec to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/4fce53f9a55c4d3f862ab62759a5c4ec 2024-12-15T04:39:09,143 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/df9858aad69a412dac01addd173bf5f0 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/df9858aad69a412dac01addd173bf5f0 2024-12-15T04:39:09,143 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/1e4068d71d1e4b89b70a7ecdde7be34b to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/1e4068d71d1e4b89b70a7ecdde7be34b 2024-12-15T04:39:09,143 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/ef6226a79b494154866c55edd4cd431e to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/ef6226a79b494154866c55edd4cd431e 2024-12-15T04:39:09,143 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/254a232b18d24ab091083c866a134f9e to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/254a232b18d24ab091083c866a134f9e 2024-12-15T04:39:09,147 DEBUG [StoreCloser-TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/20bd25bbf5544a9a8317b04a1465cc34, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/ff316e61fdbd43b7969fa984e46a6148, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/a1cc6c9931314587815b35f492aa918a, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/30f3f3056a70454d8378ee81d929692d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/122dbb7f65804b3289ca6e49318f81c5, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/b93f58627cd94f6cb5c812b6bef90fc9, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/de75e2ccbea34928b14502e0104ced72, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/4823c7107c974075b882703ee2722207, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/ab653a7a5b9e4b59a04db1723c7cc2a1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/1c99e43885964cf99f6bee3ced8af7bb, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/de483aa5c05a44ae93daffb245b6b106, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/064e6a4a17844d84b7eec32744b7c10d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/bfa524c333134ebfb511320b1f8ee391, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/791e61b9fa844479997859aea57fc02c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/1ff0765b4dc74dc2870422b580482892, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/47549256eda94b7691b3cabb10fc83fe, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/a19a8dc1d94f44388b96692b53998f0e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/bbaec87a1a0043b0b161e45e8bd84c47, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/1d239300ec4b43afacbba82d9c665a2e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/66dc35692206408782f9b0c816349c05, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/006dc74068964cb894984f57b4128d4d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/c5be3f227128493bb24d92fb9f52c0b2, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/6fe22180a0504855947ca720f4af524e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/c5bb11933694429381af851b556733db, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/1b16eef30d8e40adab8ecd60600eec8e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/167aad807fb449748292a3d6b592f5ba, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/d7ca3fbf568c4ce1af77a60ca90af05f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/50bb9b1189be419ea786321289b19d6a, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/43d8f0193b10418a95180cd06716931d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/c1ec1d4dec4d4fefbb1b67b821e1b08d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/e61a19c693e244d6940c5f603d0e3734] to archive 2024-12-15T04:39:09,148 DEBUG [StoreCloser-TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-15T04:39:09,150 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/20bd25bbf5544a9a8317b04a1465cc34 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/20bd25bbf5544a9a8317b04a1465cc34 2024-12-15T04:39:09,151 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/b93f58627cd94f6cb5c812b6bef90fc9 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/b93f58627cd94f6cb5c812b6bef90fc9 2024-12-15T04:39:09,151 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/a1cc6c9931314587815b35f492aa918a to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/a1cc6c9931314587815b35f492aa918a 2024-12-15T04:39:09,151 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/30f3f3056a70454d8378ee81d929692d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/30f3f3056a70454d8378ee81d929692d 2024-12-15T04:39:09,151 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/4823c7107c974075b882703ee2722207 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/4823c7107c974075b882703ee2722207 2024-12-15T04:39:09,151 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/de75e2ccbea34928b14502e0104ced72 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/de75e2ccbea34928b14502e0104ced72 2024-12-15T04:39:09,151 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/ff316e61fdbd43b7969fa984e46a6148 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/ff316e61fdbd43b7969fa984e46a6148 2024-12-15T04:39:09,151 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/122dbb7f65804b3289ca6e49318f81c5 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/122dbb7f65804b3289ca6e49318f81c5 2024-12-15T04:39:09,153 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/1c99e43885964cf99f6bee3ced8af7bb to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/1c99e43885964cf99f6bee3ced8af7bb 2024-12-15T04:39:09,153 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/ab653a7a5b9e4b59a04db1723c7cc2a1 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/ab653a7a5b9e4b59a04db1723c7cc2a1 2024-12-15T04:39:09,153 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/064e6a4a17844d84b7eec32744b7c10d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/064e6a4a17844d84b7eec32744b7c10d 2024-12-15T04:39:09,153 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/791e61b9fa844479997859aea57fc02c to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/791e61b9fa844479997859aea57fc02c 2024-12-15T04:39:09,153 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/bfa524c333134ebfb511320b1f8ee391 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/bfa524c333134ebfb511320b1f8ee391 2024-12-15T04:39:09,153 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/1ff0765b4dc74dc2870422b580482892 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/1ff0765b4dc74dc2870422b580482892 2024-12-15T04:39:09,154 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/de483aa5c05a44ae93daffb245b6b106 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/de483aa5c05a44ae93daffb245b6b106 2024-12-15T04:39:09,154 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/47549256eda94b7691b3cabb10fc83fe to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/47549256eda94b7691b3cabb10fc83fe 2024-12-15T04:39:09,155 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/1d239300ec4b43afacbba82d9c665a2e to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/1d239300ec4b43afacbba82d9c665a2e 2024-12-15T04:39:09,155 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/a19a8dc1d94f44388b96692b53998f0e to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/a19a8dc1d94f44388b96692b53998f0e 2024-12-15T04:39:09,155 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/bbaec87a1a0043b0b161e45e8bd84c47 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/bbaec87a1a0043b0b161e45e8bd84c47 2024-12-15T04:39:09,155 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/66dc35692206408782f9b0c816349c05 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/66dc35692206408782f9b0c816349c05 2024-12-15T04:39:09,156 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/006dc74068964cb894984f57b4128d4d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/006dc74068964cb894984f57b4128d4d 2024-12-15T04:39:09,156 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/c5be3f227128493bb24d92fb9f52c0b2 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/c5be3f227128493bb24d92fb9f52c0b2 2024-12-15T04:39:09,156 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/6fe22180a0504855947ca720f4af524e to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/6fe22180a0504855947ca720f4af524e 2024-12-15T04:39:09,156 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/c5bb11933694429381af851b556733db to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/c5bb11933694429381af851b556733db 2024-12-15T04:39:09,157 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/1b16eef30d8e40adab8ecd60600eec8e to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/1b16eef30d8e40adab8ecd60600eec8e 2024-12-15T04:39:09,157 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/d7ca3fbf568c4ce1af77a60ca90af05f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/d7ca3fbf568c4ce1af77a60ca90af05f 2024-12-15T04:39:09,157 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/167aad807fb449748292a3d6b592f5ba to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/167aad807fb449748292a3d6b592f5ba 2024-12-15T04:39:09,158 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/43d8f0193b10418a95180cd06716931d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/43d8f0193b10418a95180cd06716931d 2024-12-15T04:39:09,158 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/50bb9b1189be419ea786321289b19d6a to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/50bb9b1189be419ea786321289b19d6a 2024-12-15T04:39:09,158 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/e61a19c693e244d6940c5f603d0e3734 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/e61a19c693e244d6940c5f603d0e3734 2024-12-15T04:39:09,158 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/c1ec1d4dec4d4fefbb1b67b821e1b08d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/c1ec1d4dec4d4fefbb1b67b821e1b08d 2024-12-15T04:39:09,159 DEBUG [StoreCloser-TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/d0c9bf9b018a4336b6bcb3e98d0b1be1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/46f1f2c70ad344aa8c05c16671b7c390, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/b36d2b88d6004924b35599d0c21ff1c2, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/61dc7b599c2049429621ef43a4cf4f8a, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/16742f2046e04f1a859155fb1408c999, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/62bb6883a73841b898666885138865eb, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/54d3e8e98f934fa6946627a4f23d7b1a, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/2052d82ea40f45f1b546b62a16d50c24, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/2feeb7f3a1854f239fc3b2b7e69886c5, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/d3e2d92de699401a90d18c44b8332db8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/6f3d2ff4bb5c46c6b52536b5fb7df68e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/aa62b59d58b344e487a8c31aa383c004, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/7d7dcd96436d4a50b8186f639df9ba8e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/3e50d4bbf5724fb38cbcd3eadf1947dc, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/8ccdaa0fd1c14116b169b120e8ff2b9d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/1fdf7f1ec255427bad313cfb5f566d47, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/63f6d90e50cd464b8d169e3368e55057, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/c2d2c6cd1aa544c8be765c0d91fe66d9, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/1eea87604a934a358b4f608be12eb36c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/7c41a3fe5e814aa3bb71f98762a5f9b9, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/1570ccfa3f434c9b8fd25eb804d8fadb, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/db05a77cd46a41e5b5b1792c1fb734ab, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/1172ba09c8cd47f8a70edf4bdfc315d2, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/57be76efd37048caa681f21820930721, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/50ada1ac711f4bd3be4f54c82bd1704c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/a0dbd19531124ad39e78d1d411d7db89, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/91f109b5b8834ff9ada077780fb49875, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/bbf6c79ed3954ec99aa3d602c6f8bdad, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/44941bffdebb409ba34fa6bdfed01bc8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/a77a260555a94f708346a7700b59c587, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/41726d462d17422ea0529e968ff93e8d] to archive 2024-12-15T04:39:09,160 DEBUG [StoreCloser-TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-15T04:39:09,162 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/46f1f2c70ad344aa8c05c16671b7c390 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/46f1f2c70ad344aa8c05c16671b7c390 2024-12-15T04:39:09,162 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/b36d2b88d6004924b35599d0c21ff1c2 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/b36d2b88d6004924b35599d0c21ff1c2 2024-12-15T04:39:09,162 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/d0c9bf9b018a4336b6bcb3e98d0b1be1 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/d0c9bf9b018a4336b6bcb3e98d0b1be1 2024-12-15T04:39:09,162 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/54d3e8e98f934fa6946627a4f23d7b1a to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/54d3e8e98f934fa6946627a4f23d7b1a 2024-12-15T04:39:09,162 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/16742f2046e04f1a859155fb1408c999 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/16742f2046e04f1a859155fb1408c999 2024-12-15T04:39:09,162 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/2052d82ea40f45f1b546b62a16d50c24 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/2052d82ea40f45f1b546b62a16d50c24 2024-12-15T04:39:09,163 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/62bb6883a73841b898666885138865eb to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/62bb6883a73841b898666885138865eb 2024-12-15T04:39:09,163 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/61dc7b599c2049429621ef43a4cf4f8a to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/61dc7b599c2049429621ef43a4cf4f8a 2024-12-15T04:39:09,164 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/2feeb7f3a1854f239fc3b2b7e69886c5 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/2feeb7f3a1854f239fc3b2b7e69886c5 2024-12-15T04:39:09,164 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/aa62b59d58b344e487a8c31aa383c004 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/aa62b59d58b344e487a8c31aa383c004 2024-12-15T04:39:09,164 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/d3e2d92de699401a90d18c44b8332db8 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/d3e2d92de699401a90d18c44b8332db8 2024-12-15T04:39:09,164 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/3e50d4bbf5724fb38cbcd3eadf1947dc to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/3e50d4bbf5724fb38cbcd3eadf1947dc 2024-12-15T04:39:09,165 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/6f3d2ff4bb5c46c6b52536b5fb7df68e to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/6f3d2ff4bb5c46c6b52536b5fb7df68e 2024-12-15T04:39:09,165 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/8ccdaa0fd1c14116b169b120e8ff2b9d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/8ccdaa0fd1c14116b169b120e8ff2b9d 2024-12-15T04:39:09,165 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/1fdf7f1ec255427bad313cfb5f566d47 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/1fdf7f1ec255427bad313cfb5f566d47 2024-12-15T04:39:09,165 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/7d7dcd96436d4a50b8186f639df9ba8e to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/7d7dcd96436d4a50b8186f639df9ba8e 2024-12-15T04:39:09,166 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/c2d2c6cd1aa544c8be765c0d91fe66d9 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/c2d2c6cd1aa544c8be765c0d91fe66d9 2024-12-15T04:39:09,166 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/63f6d90e50cd464b8d169e3368e55057 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/63f6d90e50cd464b8d169e3368e55057 2024-12-15T04:39:09,166 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/1eea87604a934a358b4f608be12eb36c to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/1eea87604a934a358b4f608be12eb36c 2024-12-15T04:39:09,166 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/7c41a3fe5e814aa3bb71f98762a5f9b9 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/7c41a3fe5e814aa3bb71f98762a5f9b9 2024-12-15T04:39:09,167 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/1172ba09c8cd47f8a70edf4bdfc315d2 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/1172ba09c8cd47f8a70edf4bdfc315d2 2024-12-15T04:39:09,167 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/1570ccfa3f434c9b8fd25eb804d8fadb to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/1570ccfa3f434c9b8fd25eb804d8fadb 2024-12-15T04:39:09,167 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/db05a77cd46a41e5b5b1792c1fb734ab to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/db05a77cd46a41e5b5b1792c1fb734ab 2024-12-15T04:39:09,167 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/a0dbd19531124ad39e78d1d411d7db89 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/a0dbd19531124ad39e78d1d411d7db89 2024-12-15T04:39:09,168 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/91f109b5b8834ff9ada077780fb49875 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/91f109b5b8834ff9ada077780fb49875 2024-12-15T04:39:09,168 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/50ada1ac711f4bd3be4f54c82bd1704c to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/50ada1ac711f4bd3be4f54c82bd1704c 2024-12-15T04:39:09,168 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/bbf6c79ed3954ec99aa3d602c6f8bdad to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/bbf6c79ed3954ec99aa3d602c6f8bdad 2024-12-15T04:39:09,168 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/57be76efd37048caa681f21820930721 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/57be76efd37048caa681f21820930721 2024-12-15T04:39:09,168 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/44941bffdebb409ba34fa6bdfed01bc8 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/44941bffdebb409ba34fa6bdfed01bc8 2024-12-15T04:39:09,168 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/41726d462d17422ea0529e968ff93e8d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/41726d462d17422ea0529e968ff93e8d 2024-12-15T04:39:09,168 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/a77a260555a94f708346a7700b59c587 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/a77a260555a94f708346a7700b59c587 2024-12-15T04:39:09,172 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/recovered.edits/454.seqid, newMaxSeqId=454, maxSeqId=4 2024-12-15T04:39:09,173 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947. 2024-12-15T04:39:09,173 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] regionserver.HRegion(1635): Region close journal for 574a05e47406cea06ff474376a420947: 2024-12-15T04:39:09,174 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=62}] handler.UnassignRegionHandler(170): Closed 574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,175 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=61 updating hbase:meta row=574a05e47406cea06ff474376a420947, regionState=CLOSED 2024-12-15T04:39:09,176 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-12-15T04:39:09,176 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; CloseRegionProcedure 574a05e47406cea06ff474376a420947, server=e56de37b85b3,43199,1734237482035 in 1.8920 sec 2024-12-15T04:39:09,177 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=61, resume processing ppid=60 2024-12-15T04:39:09,177 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, ppid=60, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=574a05e47406cea06ff474376a420947, UNASSIGN in 1.8950 sec 2024-12-15T04:39:09,178 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-12-15T04:39:09,178 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8970 sec 2024-12-15T04:39:09,179 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237549179"}]},"ts":"1734237549179"} 2024-12-15T04:39:09,180 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-15T04:39:09,213 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-15T04:39:09,215 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9660 sec 2024-12-15T04:39:09,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-12-15T04:39:09,361 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-12-15T04:39:09,362 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-15T04:39:09,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:39:09,364 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=63, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:39:09,365 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=63, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:39:09,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-15T04:39:09,368 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,373 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A, FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B, FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C, FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/recovered.edits] 2024-12-15T04:39:09,380 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/bc7f704998c143f5b76d0f9ba5999c5d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/bc7f704998c143f5b76d0f9ba5999c5d 2024-12-15T04:39:09,381 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/fba98e7dd2c54b75a835682975997835 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/A/fba98e7dd2c54b75a835682975997835 2024-12-15T04:39:09,386 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/3ceee31491b1444290731ff8c9492df1 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/3ceee31491b1444290731ff8c9492df1 2024-12-15T04:39:09,386 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/cfbfb85c458e4c03bad513e5d56d5141 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/B/cfbfb85c458e4c03bad513e5d56d5141 2024-12-15T04:39:09,391 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/3ef00e5b69664932ae18df5ba53e024f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/3ef00e5b69664932ae18df5ba53e024f 2024-12-15T04:39:09,391 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/9a0d77312c56473190fff3e47595f4e7 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/C/9a0d77312c56473190fff3e47595f4e7 2024-12-15T04:39:09,396 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/recovered.edits/454.seqid to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947/recovered.edits/454.seqid 2024-12-15T04:39:09,397 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,397 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-15T04:39:09,397 DEBUG [PEWorker-1 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-15T04:39:09,399 DEBUG [PEWorker-1 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-15T04:39:09,414 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121505f04d9eafba459aa596ab54a269feee_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121505f04d9eafba459aa596ab54a269feee_574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,414 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121500b3455322c64f9e9c82b9e5ce0b0b02_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121500b3455322c64f9e9c82b9e5ce0b0b02_574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,414 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121514a6c42bb1b64e44aa76f9552a456cd1_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121514a6c42bb1b64e44aa76f9552a456cd1_574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,414 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412151cbc271c46a64f3c849adf475b6c3271_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412151cbc271c46a64f3c849adf475b6c3271_574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,414 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412152951ce2c339d46a69a37271a269c3140_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412152951ce2c339d46a69a37271a269c3140_574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,414 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412153431b7768fcd4001b3476dc69bb3c85d_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412153431b7768fcd4001b3476dc69bb3c85d_574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,415 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121546759a71b69a41a1a78a089e71b4d301_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121546759a71b69a41a1a78a089e71b4d301_574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,415 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412154aa560be307248df93b8a1d112df94c6_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412154aa560be307248df93b8a1d112df94c6_574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,417 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121561ef916e0d484a80a7470021cf0524c8_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121561ef916e0d484a80a7470021cf0524c8_574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,417 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215675a803585eb4be29006539511aad549_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215675a803585eb4be29006539511aad549_574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,417 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412155fdcc2f891d44e55bf413b0c843469af_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412155fdcc2f891d44e55bf413b0c843469af_574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,417 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121588a0a3bc3d3a4b9ca712516add04b871_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121588a0a3bc3d3a4b9ca712516add04b871_574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,417 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412156a24637f470542e9a71fe133d92af579_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412156a24637f470542e9a71fe133d92af579_574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,417 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215965149fec176478aa9386e0f7478d069_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215965149fec176478aa9386e0f7478d069_574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,418 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412159bf2393c545040128e3fedc1c818519f_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412159bf2393c545040128e3fedc1c818519f_574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,418 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412159cd634d2dd3a44d6b7060f56cc242783_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412159cd634d2dd3a44d6b7060f56cc242783_574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,419 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215a8168e5eb5274a69a43f7e40f05c48db_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215a8168e5eb5274a69a43f7e40f05c48db_574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,419 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215b03ba245080a43468377add7a6e79ef6_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215b03ba245080a43468377add7a6e79ef6_574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,419 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215b4a9dd405546408f8754a65b8e32b506_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215b4a9dd405546408f8754a65b8e32b506_574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,419 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215e86e2ca3820c460fb99363b26dae8779_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215e86e2ca3820c460fb99363b26dae8779_574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,419 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215ef9619968eeb43f490ec971f0fe280c1_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215ef9619968eeb43f490ec971f0fe280c1_574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,419 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215f3764152c57d4b26b9639856cb929005_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215f3764152c57d4b26b9639856cb929005_574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,419 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215c054404cc07e4f118f76233e5b7c02fd_574a05e47406cea06ff474376a420947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215c054404cc07e4f118f76233e5b7c02fd_574a05e47406cea06ff474376a420947 2024-12-15T04:39:09,420 DEBUG [PEWorker-1 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-15T04:39:09,422 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=63, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:39:09,424 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-15T04:39:09,426 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-15T04:39:09,427 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=63, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:39:09,427 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-15T04:39:09,427 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734237549427"}]},"ts":"9223372036854775807"} 2024-12-15T04:39:09,429 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-15T04:39:09,429 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 574a05e47406cea06ff474376a420947, NAME => 'TestAcidGuarantees,,1734237518351.574a05e47406cea06ff474376a420947.', STARTKEY => '', ENDKEY => ''}] 2024-12-15T04:39:09,429 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-15T04:39:09,429 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734237549429"}]},"ts":"9223372036854775807"} 2024-12-15T04:39:09,430 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-15T04:39:09,438 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=63, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:39:09,440 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 76 msec 2024-12-15T04:39:09,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-15T04:39:09,467 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-12-15T04:39:09,482 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobMixedAtomicity Thread=244 (was 245), OpenFileDescriptor=451 (was 459), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=326 (was 249) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4543 (was 4677) 2024-12-15T04:39:09,491 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=244, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=326, ProcessCount=11, AvailableMemoryMB=4543 2024-12-15T04:39:09,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-15T04:39:09,493 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-15T04:39:09,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=64, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-15T04:39:09,495 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-15T04:39:09,495 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:09,495 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 64 2024-12-15T04:39:09,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-12-15T04:39:09,496 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-15T04:39:09,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742082_1258 (size=963) 2024-12-15T04:39:09,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-12-15T04:39:09,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-12-15T04:39:09,908 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9 2024-12-15T04:39:09,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742083_1259 (size=53) 2024-12-15T04:39:10,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-12-15T04:39:10,321 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T04:39:10,321 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 35d053ddd9bda1d702f0fa4863288c02, disabling compactions & flushes 2024-12-15T04:39:10,321 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:10,321 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:10,321 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. after waiting 0 ms 2024-12-15T04:39:10,322 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:10,322 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:10,322 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:10,324 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-15T04:39:10,325 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734237550324"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734237550324"}]},"ts":"1734237550324"} 2024-12-15T04:39:10,328 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-15T04:39:10,329 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-15T04:39:10,329 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237550329"}]},"ts":"1734237550329"} 2024-12-15T04:39:10,331 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-15T04:39:10,372 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=35d053ddd9bda1d702f0fa4863288c02, ASSIGN}] 2024-12-15T04:39:10,374 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=35d053ddd9bda1d702f0fa4863288c02, ASSIGN 2024-12-15T04:39:10,375 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=35d053ddd9bda1d702f0fa4863288c02, ASSIGN; state=OFFLINE, location=e56de37b85b3,43199,1734237482035; forceNewPlan=false, retain=false 2024-12-15T04:39:10,526 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=35d053ddd9bda1d702f0fa4863288c02, regionState=OPENING, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:10,527 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; OpenRegionProcedure 35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035}] 2024-12-15T04:39:10,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-12-15T04:39:10,680 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:10,687 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:10,688 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7285): Opening region: {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} 2024-12-15T04:39:10,689 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:10,689 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T04:39:10,689 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7327): checking encryption for 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:10,690 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7330): checking classloading for 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:10,691 INFO [StoreOpener-35d053ddd9bda1d702f0fa4863288c02-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:10,692 INFO [StoreOpener-35d053ddd9bda1d702f0fa4863288c02-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:39:10,692 INFO [StoreOpener-35d053ddd9bda1d702f0fa4863288c02-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 35d053ddd9bda1d702f0fa4863288c02 columnFamilyName A 2024-12-15T04:39:10,692 DEBUG [StoreOpener-35d053ddd9bda1d702f0fa4863288c02-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:10,693 INFO [StoreOpener-35d053ddd9bda1d702f0fa4863288c02-1 {}] regionserver.HStore(327): Store=35d053ddd9bda1d702f0fa4863288c02/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:39:10,693 INFO [StoreOpener-35d053ddd9bda1d702f0fa4863288c02-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:10,694 INFO [StoreOpener-35d053ddd9bda1d702f0fa4863288c02-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:39:10,694 INFO [StoreOpener-35d053ddd9bda1d702f0fa4863288c02-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 35d053ddd9bda1d702f0fa4863288c02 columnFamilyName B 2024-12-15T04:39:10,694 DEBUG [StoreOpener-35d053ddd9bda1d702f0fa4863288c02-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:10,694 INFO [StoreOpener-35d053ddd9bda1d702f0fa4863288c02-1 {}] regionserver.HStore(327): Store=35d053ddd9bda1d702f0fa4863288c02/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:39:10,694 INFO [StoreOpener-35d053ddd9bda1d702f0fa4863288c02-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:10,695 INFO [StoreOpener-35d053ddd9bda1d702f0fa4863288c02-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:39:10,695 INFO [StoreOpener-35d053ddd9bda1d702f0fa4863288c02-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 35d053ddd9bda1d702f0fa4863288c02 columnFamilyName C 2024-12-15T04:39:10,695 DEBUG [StoreOpener-35d053ddd9bda1d702f0fa4863288c02-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:10,696 INFO [StoreOpener-35d053ddd9bda1d702f0fa4863288c02-1 {}] regionserver.HStore(327): Store=35d053ddd9bda1d702f0fa4863288c02/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:39:10,696 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:10,697 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:10,697 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:10,698 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-15T04:39:10,699 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1085): writing seq id for 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:10,701 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T04:39:10,701 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1102): Opened 35d053ddd9bda1d702f0fa4863288c02; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67160299, jitterRate=7.664412260055542E-4}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-15T04:39:10,702 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1001): Region open journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:10,703 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., pid=66, masterSystemTime=1734237550680 2024-12-15T04:39:10,704 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:10,704 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:10,704 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=35d053ddd9bda1d702f0fa4863288c02, regionState=OPEN, openSeqNum=2, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:10,707 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-12-15T04:39:10,707 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; OpenRegionProcedure 35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 in 178 msec 2024-12-15T04:39:10,708 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=64 2024-12-15T04:39:10,708 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=64, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=35d053ddd9bda1d702f0fa4863288c02, ASSIGN in 335 msec 2024-12-15T04:39:10,709 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-15T04:39:10,709 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237550709"}]},"ts":"1734237550709"} 2024-12-15T04:39:10,710 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-15T04:39:10,720 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=64, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-15T04:39:10,721 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2270 sec 2024-12-15T04:39:11,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=64 2024-12-15T04:39:11,606 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 64 completed 2024-12-15T04:39:11,610 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08d0caa5 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@34cb3991 2024-12-15T04:39:11,657 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e55eb7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:39:11,660 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:39:11,663 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49462, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:39:11,666 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-15T04:39:11,667 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45510, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-15T04:39:11,670 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x43f04e0e to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e9ae050 2024-12-15T04:39:11,768 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a703d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:39:11,772 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x560ec309 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2fef31f8 2024-12-15T04:39:11,881 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14ed1e44, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:39:11,882 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5886c0f2 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@eb04aeb 2024-12-15T04:39:11,895 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72537a47, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:39:11,898 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x66e575aa to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6a0e9c8f 2024-12-15T04:39:11,906 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36642cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:39:11,908 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x131ceb8f to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d68f787 2024-12-15T04:39:11,920 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c299cfb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:39:11,923 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x10e6bf6a to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@605827c9 2024-12-15T04:39:11,931 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d1403c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:39:11,934 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1730a60f to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3677bd4f 2024-12-15T04:39:11,946 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bf0ba59, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:39:11,948 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x598cfed4 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@521aad6f 2024-12-15T04:39:11,955 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c86f707, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:39:11,957 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x68ad882f to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6f5b2180 2024-12-15T04:39:11,963 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34becda3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:39:11,964 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2b976e1a to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1df61dc9 2024-12-15T04:39:11,972 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fe71801, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:39:11,976 DEBUG [hconnection-0x1ee769e6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:39:11,976 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:39:11,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees 2024-12-15T04:39:11,977 DEBUG [hconnection-0x4a6419b6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:39:11,977 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49464, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:39:11,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-15T04:39:11,977 DEBUG [hconnection-0xf8d7e09-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:39:11,978 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:39:11,978 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49466, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:39:11,978 DEBUG [hconnection-0x4c6c63e2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:39:11,978 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49478, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:39:11,978 DEBUG [hconnection-0x786f1d2b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:39:11,978 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=67, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:39:11,979 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:39:11,979 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49484, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:39:11,979 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49500, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:39:11,980 DEBUG [hconnection-0x2abbb9f2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:39:11,980 DEBUG [hconnection-0x2801c8db-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:39:11,981 DEBUG [hconnection-0x3c450ac1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:39:11,981 DEBUG [hconnection-0x1c6bc362-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:39:11,981 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49516, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:39:11,982 DEBUG [hconnection-0x42dc623c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:39:11,982 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49528, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:39:11,982 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49540, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:39:11,982 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49546, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:39:11,983 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49544, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:39:11,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:11,988 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 35d053ddd9bda1d702f0fa4863288c02 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-15T04:39:11,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=A 2024-12-15T04:39:11,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:11,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=B 2024-12-15T04:39:11,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:11,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=C 2024-12-15T04:39:11,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:12,016 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237612011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,016 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237612013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,017 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237612013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,017 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,018 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/ba7c9248e681436780f1735545c7d41e is 50, key is test_row_0/A:col10/1734237551987/Put/seqid=0 2024-12-15T04:39:12,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237612014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,018 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237612014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742084_1260 (size=12001) 2024-12-15T04:39:12,067 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/ba7c9248e681436780f1735545c7d41e 2024-12-15T04:39:12,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-15T04:39:12,095 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/d8f26fb5f1e04e7bafc07b330eeb2dd6 is 50, key is test_row_0/B:col10/1734237551987/Put/seqid=0 2024-12-15T04:39:12,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742085_1261 (size=12001) 2024-12-15T04:39:12,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237612117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237612118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237612118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237612119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237612119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,130 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,131 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-15T04:39:12,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:12,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:12,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:12,131 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:12,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:12,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:12,283 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,283 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-15T04:39:12,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:12,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:12,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:12,283 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:12,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:12,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:12,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-15T04:39:12,320 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237612320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,321 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237612320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237612321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237612321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,322 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237612322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,435 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,436 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-15T04:39:12,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:12,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:12,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:12,436 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] handler.RSProcedureHandler(58): pid=68 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:12,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=68 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:12,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=68 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:12,505 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/d8f26fb5f1e04e7bafc07b330eeb2dd6 2024-12-15T04:39:12,528 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/0c617e09cf6543c894dd61f5bba30b30 is 50, key is test_row_0/C:col10/1734237551987/Put/seqid=0 2024-12-15T04:39:12,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742086_1262 (size=12001) 2024-12-15T04:39:12,532 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/0c617e09cf6543c894dd61f5bba30b30 2024-12-15T04:39:12,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/ba7c9248e681436780f1735545c7d41e as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/ba7c9248e681436780f1735545c7d41e 2024-12-15T04:39:12,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/ba7c9248e681436780f1735545c7d41e, entries=150, sequenceid=14, filesize=11.7 K 2024-12-15T04:39:12,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/d8f26fb5f1e04e7bafc07b330eeb2dd6 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/d8f26fb5f1e04e7bafc07b330eeb2dd6 2024-12-15T04:39:12,544 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/d8f26fb5f1e04e7bafc07b330eeb2dd6, entries=150, sequenceid=14, filesize=11.7 K 2024-12-15T04:39:12,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/0c617e09cf6543c894dd61f5bba30b30 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/0c617e09cf6543c894dd61f5bba30b30 2024-12-15T04:39:12,548 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/0c617e09cf6543c894dd61f5bba30b30, entries=150, sequenceid=14, filesize=11.7 K 2024-12-15T04:39:12,549 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 35d053ddd9bda1d702f0fa4863288c02 in 561ms, sequenceid=14, compaction requested=false 2024-12-15T04:39:12,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:12,587 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,588 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=68 2024-12-15T04:39:12,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:12,588 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2837): Flushing 35d053ddd9bda1d702f0fa4863288c02 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-15T04:39:12,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=A 2024-12-15T04:39:12,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:12,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=B 2024-12-15T04:39:12,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:12,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=C 2024-12-15T04:39:12,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:12,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-15T04:39:12,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/1d58181acfb949d696c1d0cb13b269d1 is 50, key is test_row_0/A:col10/1734237552013/Put/seqid=0 2024-12-15T04:39:12,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742087_1263 (size=12001) 2024-12-15T04:39:12,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:12,625 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:12,632 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237612629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,633 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237612630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237612629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,635 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237612629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237612631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237612734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237612734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237612736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237612736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237612744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237612937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237612937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237612937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,939 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237612937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:12,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237612945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:12,999 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/1d58181acfb949d696c1d0cb13b269d1 2024-12-15T04:39:13,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/302c50c86e774b26a050cb17816979fe is 50, key is test_row_0/B:col10/1734237552013/Put/seqid=0 2024-12-15T04:39:13,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742088_1264 (size=12001) 2024-12-15T04:39:13,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-15T04:39:13,240 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:13,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237613239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:13,241 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:13,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237613239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:13,241 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:13,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237613240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:13,242 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:13,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237613241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:13,250 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:13,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237613249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:13,410 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/302c50c86e774b26a050cb17816979fe 2024-12-15T04:39:13,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/39c124cff36a4c5b9a1f026cff8efdb6 is 50, key is test_row_0/C:col10/1734237552013/Put/seqid=0 2024-12-15T04:39:13,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742089_1265 (size=12001) 2024-12-15T04:39:13,437 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/39c124cff36a4c5b9a1f026cff8efdb6 2024-12-15T04:39:13,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/1d58181acfb949d696c1d0cb13b269d1 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/1d58181acfb949d696c1d0cb13b269d1 2024-12-15T04:39:13,444 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/1d58181acfb949d696c1d0cb13b269d1, entries=150, sequenceid=37, filesize=11.7 K 2024-12-15T04:39:13,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/302c50c86e774b26a050cb17816979fe as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/302c50c86e774b26a050cb17816979fe 2024-12-15T04:39:13,449 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/302c50c86e774b26a050cb17816979fe, entries=150, sequenceid=37, filesize=11.7 K 2024-12-15T04:39:13,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/39c124cff36a4c5b9a1f026cff8efdb6 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/39c124cff36a4c5b9a1f026cff8efdb6 2024-12-15T04:39:13,455 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/39c124cff36a4c5b9a1f026cff8efdb6, entries=150, sequenceid=37, filesize=11.7 K 2024-12-15T04:39:13,456 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 35d053ddd9bda1d702f0fa4863288c02 in 868ms, sequenceid=37, compaction requested=false 2024-12-15T04:39:13,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.HRegion(2538): Flush status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:13,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:13,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=68}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=68 2024-12-15T04:39:13,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=68 2024-12-15T04:39:13,459 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-12-15T04:39:13,459 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4790 sec 2024-12-15T04:39:13,460 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=67, table=TestAcidGuarantees in 1.4830 sec 2024-12-15T04:39:13,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:13,745 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 35d053ddd9bda1d702f0fa4863288c02 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-15T04:39:13,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=A 2024-12-15T04:39:13,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:13,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=B 2024-12-15T04:39:13,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:13,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=C 2024-12-15T04:39:13,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:13,750 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/0513b2f6c7964a7ea13637639d268e73 is 50, key is test_row_0/A:col10/1734237552628/Put/seqid=0 2024-12-15T04:39:13,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742090_1266 (size=12001) 2024-12-15T04:39:13,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:13,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237613761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:13,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:13,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237613762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:13,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:13,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237613763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:13,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:13,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237613763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:13,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:13,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237613764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:13,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:13,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237613864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:13,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:13,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237613866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:13,867 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:13,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237613866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:13,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:13,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237613866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:13,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:13,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237613867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:14,067 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:14,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237614066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:14,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:14,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237614069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:14,071 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:14,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:14,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237614069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:14,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237614069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:14,071 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:14,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237614069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:14,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-12-15T04:39:14,091 INFO [Thread-1199 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-12-15T04:39:14,091 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:39:14,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-12-15T04:39:14,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-15T04:39:14,093 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:39:14,093 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:39:14,093 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:39:14,154 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/0513b2f6c7964a7ea13637639d268e73 2024-12-15T04:39:14,160 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/8d8dcb296b4349d2af6de3002e57d19d is 50, key is test_row_0/B:col10/1734237552628/Put/seqid=0 2024-12-15T04:39:14,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742091_1267 (size=12001) 2024-12-15T04:39:14,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-15T04:39:14,244 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:14,244 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-15T04:39:14,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:14,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:14,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:14,245 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:14,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:14,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:14,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:14,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237614371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:14,372 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:14,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237614372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:14,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:14,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237614373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:14,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:14,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237614374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:14,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:14,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237614374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:14,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-15T04:39:14,396 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:14,397 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-15T04:39:14,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:14,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:14,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:14,397 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:14,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:14,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:14,549 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:14,549 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-15T04:39:14,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:14,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:14,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:14,550 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:14,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:14,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:14,563 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/8d8dcb296b4349d2af6de3002e57d19d 2024-12-15T04:39:14,569 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/52bb44e3047b404b99b37bd31c9c0609 is 50, key is test_row_0/C:col10/1734237552628/Put/seqid=0 2024-12-15T04:39:14,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742092_1268 (size=12001) 2024-12-15T04:39:14,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-15T04:39:14,696 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-15T04:39:14,701 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:14,702 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-15T04:39:14,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:14,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:14,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:14,702 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:14,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:14,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:14,854 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:14,855 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-15T04:39:14,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:14,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:14,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:14,855 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:14,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:14,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:14,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:14,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237614875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:14,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:14,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237614875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:14,878 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:14,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237614876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:14,878 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:14,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237614876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:14,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:14,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237614877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:14,972 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/52bb44e3047b404b99b37bd31c9c0609 2024-12-15T04:39:14,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/0513b2f6c7964a7ea13637639d268e73 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/0513b2f6c7964a7ea13637639d268e73 2024-12-15T04:39:14,981 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/0513b2f6c7964a7ea13637639d268e73, entries=150, sequenceid=52, filesize=11.7 K 2024-12-15T04:39:14,982 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/8d8dcb296b4349d2af6de3002e57d19d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/8d8dcb296b4349d2af6de3002e57d19d 2024-12-15T04:39:14,986 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/8d8dcb296b4349d2af6de3002e57d19d, entries=150, sequenceid=52, filesize=11.7 K 2024-12-15T04:39:14,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/52bb44e3047b404b99b37bd31c9c0609 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/52bb44e3047b404b99b37bd31c9c0609 2024-12-15T04:39:14,992 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/52bb44e3047b404b99b37bd31c9c0609, entries=150, sequenceid=52, filesize=11.7 K 2024-12-15T04:39:14,993 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 35d053ddd9bda1d702f0fa4863288c02 in 1248ms, sequenceid=52, compaction requested=true 2024-12-15T04:39:14,993 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:14,993 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:14,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:39:14,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:14,994 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:14,995 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:14,995 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/A is initiating minor compaction (all files) 2024-12-15T04:39:14,995 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/A in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:14,995 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/ba7c9248e681436780f1735545c7d41e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/1d58181acfb949d696c1d0cb13b269d1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/0513b2f6c7964a7ea13637639d268e73] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=35.2 K 2024-12-15T04:39:14,995 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:14,996 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/B is initiating minor compaction (all files) 2024-12-15T04:39:14,996 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/B in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:14,996 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/d8f26fb5f1e04e7bafc07b330eeb2dd6, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/302c50c86e774b26a050cb17816979fe, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/8d8dcb296b4349d2af6de3002e57d19d] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=35.2 K 2024-12-15T04:39:14,996 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba7c9248e681436780f1735545c7d41e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734237551984 2024-12-15T04:39:14,996 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d58181acfb949d696c1d0cb13b269d1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1734237552010 2024-12-15T04:39:14,996 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting d8f26fb5f1e04e7bafc07b330eeb2dd6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734237551984 2024-12-15T04:39:14,997 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 302c50c86e774b26a050cb17816979fe, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1734237552010 2024-12-15T04:39:14,997 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0513b2f6c7964a7ea13637639d268e73, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734237552628 2024-12-15T04:39:14,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:39:14,997 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d8dcb296b4349d2af6de3002e57d19d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734237552628 2024-12-15T04:39:14,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:14,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:39:14,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:15,007 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:15,007 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#B#compaction#216 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:15,008 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#A#compaction#217 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:15,008 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-12-15T04:39:15,008 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/2ad5322f9e2642468fc57e207f24ea78 is 50, key is test_row_0/B:col10/1734237552628/Put/seqid=0 2024-12-15T04:39:15,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:15,008 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing 35d053ddd9bda1d702f0fa4863288c02 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-15T04:39:15,008 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/b1fe62e2822b469ca648599e5e4b5ef4 is 50, key is test_row_0/A:col10/1734237552628/Put/seqid=0 2024-12-15T04:39:15,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=A 2024-12-15T04:39:15,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:15,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=B 2024-12-15T04:39:15,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:15,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=C 2024-12-15T04:39:15,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:15,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/789b4514c66a472d838c84f451bf32b8 is 50, key is test_row_0/A:col10/1734237553762/Put/seqid=0 2024-12-15T04:39:15,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742094_1270 (size=12104) 2024-12-15T04:39:15,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742093_1269 (size=12104) 2024-12-15T04:39:15,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742095_1271 (size=12001) 2024-12-15T04:39:15,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-15T04:39:15,430 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/b1fe62e2822b469ca648599e5e4b5ef4 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/b1fe62e2822b469ca648599e5e4b5ef4 2024-12-15T04:39:15,433 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/789b4514c66a472d838c84f451bf32b8 2024-12-15T04:39:15,434 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/A of 35d053ddd9bda1d702f0fa4863288c02 into b1fe62e2822b469ca648599e5e4b5ef4(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:15,434 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:15,434 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/A, priority=13, startTime=1734237554993; duration=0sec 2024-12-15T04:39:15,434 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:15,434 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:A 2024-12-15T04:39:15,434 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:15,435 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:15,435 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/C is initiating minor compaction (all files) 2024-12-15T04:39:15,435 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/C in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:15,435 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/0c617e09cf6543c894dd61f5bba30b30, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/39c124cff36a4c5b9a1f026cff8efdb6, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/52bb44e3047b404b99b37bd31c9c0609] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=35.2 K 2024-12-15T04:39:15,436 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c617e09cf6543c894dd61f5bba30b30, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734237551984 2024-12-15T04:39:15,436 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39c124cff36a4c5b9a1f026cff8efdb6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1734237552010 2024-12-15T04:39:15,436 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52bb44e3047b404b99b37bd31c9c0609, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734237552628 2024-12-15T04:39:15,440 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/2ad5322f9e2642468fc57e207f24ea78 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/2ad5322f9e2642468fc57e207f24ea78 2024-12-15T04:39:15,445 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/B of 35d053ddd9bda1d702f0fa4863288c02 into 2ad5322f9e2642468fc57e207f24ea78(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:15,445 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:15,445 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/B, priority=13, startTime=1734237554994; duration=0sec 2024-12-15T04:39:15,445 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:15,445 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:B 2024-12-15T04:39:15,453 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#C#compaction#219 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:15,454 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/275eaa3f5ca04c5f86dcc9b394025424 is 50, key is test_row_0/C:col10/1734237552628/Put/seqid=0 2024-12-15T04:39:15,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/55108c236a864db2b5352a8b87bf6cbf is 50, key is test_row_0/B:col10/1734237553762/Put/seqid=0 2024-12-15T04:39:15,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742096_1272 (size=12104) 2024-12-15T04:39:15,463 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/275eaa3f5ca04c5f86dcc9b394025424 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/275eaa3f5ca04c5f86dcc9b394025424 2024-12-15T04:39:15,467 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/C of 35d053ddd9bda1d702f0fa4863288c02 into 275eaa3f5ca04c5f86dcc9b394025424(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:15,467 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:15,467 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/C, priority=13, startTime=1734237554999; duration=0sec 2024-12-15T04:39:15,467 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:15,467 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:C 2024-12-15T04:39:15,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742097_1273 (size=12001) 2024-12-15T04:39:15,869 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/55108c236a864db2b5352a8b87bf6cbf 2024-12-15T04:39:15,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/a92449404cf74a7fb694f7d0a24c7ee4 is 50, key is test_row_0/C:col10/1734237553762/Put/seqid=0 2024-12-15T04:39:15,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:15,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:15,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742098_1274 (size=12001) 2024-12-15T04:39:15,889 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/a92449404cf74a7fb694f7d0a24c7ee4 2024-12-15T04:39:15,890 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:15,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237615887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:15,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:15,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237615889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:15,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:15,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237615889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:15,892 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:15,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237615889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:15,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:15,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237615890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:15,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/789b4514c66a472d838c84f451bf32b8 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/789b4514c66a472d838c84f451bf32b8 2024-12-15T04:39:15,896 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/789b4514c66a472d838c84f451bf32b8, entries=150, sequenceid=74, filesize=11.7 K 2024-12-15T04:39:15,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/55108c236a864db2b5352a8b87bf6cbf as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/55108c236a864db2b5352a8b87bf6cbf 2024-12-15T04:39:15,900 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/55108c236a864db2b5352a8b87bf6cbf, entries=150, sequenceid=74, filesize=11.7 K 2024-12-15T04:39:15,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/a92449404cf74a7fb694f7d0a24c7ee4 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/a92449404cf74a7fb694f7d0a24c7ee4 2024-12-15T04:39:15,904 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/a92449404cf74a7fb694f7d0a24c7ee4, entries=150, sequenceid=74, filesize=11.7 K 2024-12-15T04:39:15,905 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 35d053ddd9bda1d702f0fa4863288c02 in 897ms, sequenceid=74, compaction requested=false 2024-12-15T04:39:15,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:15,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:15,906 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-15T04:39:15,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-12-15T04:39:15,908 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-12-15T04:39:15,908 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8130 sec 2024-12-15T04:39:15,909 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 1.8170 sec 2024-12-15T04:39:15,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:15,993 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 35d053ddd9bda1d702f0fa4863288c02 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-15T04:39:15,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=A 2024-12-15T04:39:15,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:15,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=B 2024-12-15T04:39:15,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:15,993 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=C 2024-12-15T04:39:15,994 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:16,001 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/f67d91098c1a4154a030a119c7bb3006 is 50, key is test_row_0/A:col10/1734237555889/Put/seqid=0 2024-12-15T04:39:16,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742099_1275 (size=12001) 2024-12-15T04:39:16,005 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/f67d91098c1a4154a030a119c7bb3006 2024-12-15T04:39:16,012 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/1abfa815e55548d988a14918da2908d9 is 50, key is test_row_0/B:col10/1734237555889/Put/seqid=0 2024-12-15T04:39:16,015 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:16,015 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:16,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237616010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237616010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,015 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:16,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237616012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742100_1276 (size=12001) 2024-12-15T04:39:16,018 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:16,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237616015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,018 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:16,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237616015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:16,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237616116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:16,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237616116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,117 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:16,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237616116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,121 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:16,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237616119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,121 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:16,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237616119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-12-15T04:39:16,196 INFO [Thread-1199 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-12-15T04:39:16,197 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:39:16,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-12-15T04:39:16,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-15T04:39:16,198 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:39:16,198 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:39:16,199 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:39:16,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-15T04:39:16,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:16,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237616317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:16,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237616317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:16,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237616317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:16,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237616323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:16,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237616323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,350 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,350 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-15T04:39:16,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:16,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:16,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:16,350 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:16,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:16,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:16,417 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/1abfa815e55548d988a14918da2908d9 2024-12-15T04:39:16,424 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/13cd456cd240479f94cb1691dc91b5ed is 50, key is test_row_0/C:col10/1734237555889/Put/seqid=0 2024-12-15T04:39:16,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742101_1277 (size=12001) 2024-12-15T04:39:16,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-15T04:39:16,502 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,502 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-15T04:39:16,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:16,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:16,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:16,503 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:16,503 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:16,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:16,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:16,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237616621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:16,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237616621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,624 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:16,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237616622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,627 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:16,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237616625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,628 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:16,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237616626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,654 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,655 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-15T04:39:16,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:16,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:16,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:16,655 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:16,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:16,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:16,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-15T04:39:16,807 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,807 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-15T04:39:16,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:16,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:16,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:16,807 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:16,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:16,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:16,828 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/13cd456cd240479f94cb1691dc91b5ed 2024-12-15T04:39:16,832 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/f67d91098c1a4154a030a119c7bb3006 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f67d91098c1a4154a030a119c7bb3006 2024-12-15T04:39:16,835 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f67d91098c1a4154a030a119c7bb3006, entries=150, sequenceid=92, filesize=11.7 K 2024-12-15T04:39:16,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/1abfa815e55548d988a14918da2908d9 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/1abfa815e55548d988a14918da2908d9 2024-12-15T04:39:16,841 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/1abfa815e55548d988a14918da2908d9, entries=150, sequenceid=92, filesize=11.7 K 2024-12-15T04:39:16,842 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/13cd456cd240479f94cb1691dc91b5ed as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/13cd456cd240479f94cb1691dc91b5ed 2024-12-15T04:39:16,851 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/13cd456cd240479f94cb1691dc91b5ed, entries=150, sequenceid=92, filesize=11.7 K 2024-12-15T04:39:16,852 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 35d053ddd9bda1d702f0fa4863288c02 in 859ms, sequenceid=92, compaction requested=true 2024-12-15T04:39:16,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:16,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:39:16,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:16,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:39:16,852 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:16,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:16,852 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:16,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:39:16,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:16,853 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:16,853 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:16,853 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/B is initiating minor compaction (all files) 2024-12-15T04:39:16,853 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/A is initiating minor compaction (all files) 2024-12-15T04:39:16,853 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/B in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:16,853 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/A in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:16,853 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/2ad5322f9e2642468fc57e207f24ea78, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/55108c236a864db2b5352a8b87bf6cbf, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/1abfa815e55548d988a14918da2908d9] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=35.3 K 2024-12-15T04:39:16,853 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/b1fe62e2822b469ca648599e5e4b5ef4, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/789b4514c66a472d838c84f451bf32b8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f67d91098c1a4154a030a119c7bb3006] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=35.3 K 2024-12-15T04:39:16,854 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ad5322f9e2642468fc57e207f24ea78, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734237552628 2024-12-15T04:39:16,854 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting b1fe62e2822b469ca648599e5e4b5ef4, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734237552628 2024-12-15T04:39:16,854 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 55108c236a864db2b5352a8b87bf6cbf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1734237553762 2024-12-15T04:39:16,854 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 789b4514c66a472d838c84f451bf32b8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1734237553762 2024-12-15T04:39:16,854 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 1abfa815e55548d988a14918da2908d9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1734237555888 2024-12-15T04:39:16,854 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting f67d91098c1a4154a030a119c7bb3006, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1734237555888 2024-12-15T04:39:16,860 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#A#compaction#225 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:16,861 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#B#compaction#226 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:16,861 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/c7220c9ccdd844659a25915ba7fc628b is 50, key is test_row_0/B:col10/1734237555889/Put/seqid=0 2024-12-15T04:39:16,861 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/f01ee7d53864474cb944fd32c4d912fc is 50, key is test_row_0/A:col10/1734237555889/Put/seqid=0 2024-12-15T04:39:16,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742102_1278 (size=12207) 2024-12-15T04:39:16,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742103_1279 (size=12207) 2024-12-15T04:39:16,873 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/f01ee7d53864474cb944fd32c4d912fc as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f01ee7d53864474cb944fd32c4d912fc 2024-12-15T04:39:16,876 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/A of 35d053ddd9bda1d702f0fa4863288c02 into f01ee7d53864474cb944fd32c4d912fc(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:16,877 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:16,877 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/A, priority=13, startTime=1734237556852; duration=0sec 2024-12-15T04:39:16,877 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:16,877 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:A 2024-12-15T04:39:16,877 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:16,878 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:16,878 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/C is initiating minor compaction (all files) 2024-12-15T04:39:16,878 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/C in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:16,878 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/275eaa3f5ca04c5f86dcc9b394025424, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/a92449404cf74a7fb694f7d0a24c7ee4, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/13cd456cd240479f94cb1691dc91b5ed] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=35.3 K 2024-12-15T04:39:16,878 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 275eaa3f5ca04c5f86dcc9b394025424, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1734237552628 2024-12-15T04:39:16,878 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting a92449404cf74a7fb694f7d0a24c7ee4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1734237553762 2024-12-15T04:39:16,879 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13cd456cd240479f94cb1691dc91b5ed, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1734237555888 2024-12-15T04:39:16,884 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#C#compaction#227 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:16,884 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/ff4dd7e0f5ae45ad9457288bcbf9d150 is 50, key is test_row_0/C:col10/1734237555889/Put/seqid=0 2024-12-15T04:39:16,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742104_1280 (size=12207) 2024-12-15T04:39:16,959 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:16,959 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-12-15T04:39:16,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:16,960 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 35d053ddd9bda1d702f0fa4863288c02 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-15T04:39:16,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=A 2024-12-15T04:39:16,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:16,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=B 2024-12-15T04:39:16,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:16,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=C 2024-12-15T04:39:16,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:16,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/39519790197d42e49dece1c259b34ea0 is 50, key is test_row_0/A:col10/1734237556012/Put/seqid=0 2024-12-15T04:39:16,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742105_1281 (size=12001) 2024-12-15T04:39:16,967 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/39519790197d42e49dece1c259b34ea0 2024-12-15T04:39:16,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/06e72db66d3d4f198db0d1f9a88f1fd3 is 50, key is test_row_0/B:col10/1734237556012/Put/seqid=0 2024-12-15T04:39:16,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742106_1282 (size=12001) 2024-12-15T04:39:16,976 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/06e72db66d3d4f198db0d1f9a88f1fd3 2024-12-15T04:39:16,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/15883e7afae24e6187af97f5d1374e2d is 50, key is test_row_0/C:col10/1734237556012/Put/seqid=0 2024-12-15T04:39:17,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742107_1283 (size=12001) 2024-12-15T04:39:17,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:17,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:17,136 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237617133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,137 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237617134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,137 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237617134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237617135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,138 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237617135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237617237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237617238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237617238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237617238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,240 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237617238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,276 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/c7220c9ccdd844659a25915ba7fc628b as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/c7220c9ccdd844659a25915ba7fc628b 2024-12-15T04:39:17,279 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/B of 35d053ddd9bda1d702f0fa4863288c02 into c7220c9ccdd844659a25915ba7fc628b(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:17,279 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:17,279 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/B, priority=13, startTime=1734237556852; duration=0sec 2024-12-15T04:39:17,279 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:17,279 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:B 2024-12-15T04:39:17,296 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/ff4dd7e0f5ae45ad9457288bcbf9d150 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/ff4dd7e0f5ae45ad9457288bcbf9d150 2024-12-15T04:39:17,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-15T04:39:17,300 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/C of 35d053ddd9bda1d702f0fa4863288c02 into ff4dd7e0f5ae45ad9457288bcbf9d150(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:17,300 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:17,300 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/C, priority=13, startTime=1734237556852; duration=0sec 2024-12-15T04:39:17,300 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:17,300 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:C 2024-12-15T04:39:17,401 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/15883e7afae24e6187af97f5d1374e2d 2024-12-15T04:39:17,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/39519790197d42e49dece1c259b34ea0 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/39519790197d42e49dece1c259b34ea0 2024-12-15T04:39:17,409 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/39519790197d42e49dece1c259b34ea0, entries=150, sequenceid=115, filesize=11.7 K 2024-12-15T04:39:17,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/06e72db66d3d4f198db0d1f9a88f1fd3 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/06e72db66d3d4f198db0d1f9a88f1fd3 2024-12-15T04:39:17,413 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/06e72db66d3d4f198db0d1f9a88f1fd3, entries=150, sequenceid=115, filesize=11.7 K 2024-12-15T04:39:17,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/15883e7afae24e6187af97f5d1374e2d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/15883e7afae24e6187af97f5d1374e2d 2024-12-15T04:39:17,419 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/15883e7afae24e6187af97f5d1374e2d, entries=150, sequenceid=115, filesize=11.7 K 2024-12-15T04:39:17,420 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 35d053ddd9bda1d702f0fa4863288c02 in 459ms, sequenceid=115, compaction requested=false 2024-12-15T04:39:17,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:17,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:17,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-15T04:39:17,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-15T04:39:17,422 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-15T04:39:17,422 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2230 sec 2024-12-15T04:39:17,424 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.2260 sec 2024-12-15T04:39:17,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:17,441 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 35d053ddd9bda1d702f0fa4863288c02 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-15T04:39:17,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=A 2024-12-15T04:39:17,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:17,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=B 2024-12-15T04:39:17,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:17,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=C 2024-12-15T04:39:17,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:17,447 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/772f37081fd74e60b654863977b4856d is 50, key is test_row_0/A:col10/1734237557441/Put/seqid=0 2024-12-15T04:39:17,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742108_1284 (size=12101) 2024-12-15T04:39:17,452 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/772f37081fd74e60b654863977b4856d 2024-12-15T04:39:17,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237617453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237617453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,457 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237617454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237617457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237617457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,461 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/adecd13f5a044b0696e385c1ded72957 is 50, key is test_row_0/B:col10/1734237557441/Put/seqid=0 2024-12-15T04:39:17,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742109_1285 (size=12101) 2024-12-15T04:39:17,558 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237617557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237617558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237617558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237617561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237617561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237617760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237617761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237617761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237617762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:17,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237617763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:17,869 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/adecd13f5a044b0696e385c1ded72957 2024-12-15T04:39:17,875 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/5d9cc14312fe42308b056378aa49cc09 is 50, key is test_row_0/C:col10/1734237557441/Put/seqid=0 2024-12-15T04:39:17,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742110_1286 (size=12101) 2024-12-15T04:39:18,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:18,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237618063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:18,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:18,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237618063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:18,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:18,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237618064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:18,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:18,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237618066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:18,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:18,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237618067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:18,278 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/5d9cc14312fe42308b056378aa49cc09 2024-12-15T04:39:18,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/772f37081fd74e60b654863977b4856d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/772f37081fd74e60b654863977b4856d 2024-12-15T04:39:18,285 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/772f37081fd74e60b654863977b4856d, entries=150, sequenceid=135, filesize=11.8 K 2024-12-15T04:39:18,286 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/adecd13f5a044b0696e385c1ded72957 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/adecd13f5a044b0696e385c1ded72957 2024-12-15T04:39:18,290 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/adecd13f5a044b0696e385c1ded72957, entries=150, sequenceid=135, filesize=11.8 K 2024-12-15T04:39:18,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/5d9cc14312fe42308b056378aa49cc09 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/5d9cc14312fe42308b056378aa49cc09 2024-12-15T04:39:18,295 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/5d9cc14312fe42308b056378aa49cc09, entries=150, sequenceid=135, filesize=11.8 K 2024-12-15T04:39:18,296 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 35d053ddd9bda1d702f0fa4863288c02 in 855ms, sequenceid=135, compaction requested=true 2024-12-15T04:39:18,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:18,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:39:18,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:18,296 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:18,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:39:18,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:18,296 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:18,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:39:18,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:18,297 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:18,297 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/B is initiating minor compaction (all files) 2024-12-15T04:39:18,297 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/B in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:18,297 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/c7220c9ccdd844659a25915ba7fc628b, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/06e72db66d3d4f198db0d1f9a88f1fd3, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/adecd13f5a044b0696e385c1ded72957] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=35.5 K 2024-12-15T04:39:18,297 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:18,297 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/A is initiating minor compaction (all files) 2024-12-15T04:39:18,297 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/A in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:18,298 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f01ee7d53864474cb944fd32c4d912fc, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/39519790197d42e49dece1c259b34ea0, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/772f37081fd74e60b654863977b4856d] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=35.5 K 2024-12-15T04:39:18,298 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting c7220c9ccdd844659a25915ba7fc628b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1734237555888 2024-12-15T04:39:18,298 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting f01ee7d53864474cb944fd32c4d912fc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1734237555888 2024-12-15T04:39:18,298 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 06e72db66d3d4f198db0d1f9a88f1fd3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1734237556007 2024-12-15T04:39:18,298 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39519790197d42e49dece1c259b34ea0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1734237556007 2024-12-15T04:39:18,298 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 772f37081fd74e60b654863977b4856d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1734237557130 2024-12-15T04:39:18,298 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting adecd13f5a044b0696e385c1ded72957, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1734237557130 2024-12-15T04:39:18,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-15T04:39:18,301 INFO [Thread-1199 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-12-15T04:39:18,303 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:39:18,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-12-15T04:39:18,304 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:39:18,305 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:39:18,305 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:39:18,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-15T04:39:18,310 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#B#compaction#234 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:18,310 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/876810db090c4c689c2a5d1717ce5640 is 50, key is test_row_0/B:col10/1734237557441/Put/seqid=0 2024-12-15T04:39:18,312 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#A#compaction#235 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:18,312 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/f10853df19204824ac28b02ed8bfbf8d is 50, key is test_row_0/A:col10/1734237557441/Put/seqid=0 2024-12-15T04:39:18,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742111_1287 (size=12409) 2024-12-15T04:39:18,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742112_1288 (size=12409) 2024-12-15T04:39:18,349 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/876810db090c4c689c2a5d1717ce5640 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/876810db090c4c689c2a5d1717ce5640 2024-12-15T04:39:18,354 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/B of 35d053ddd9bda1d702f0fa4863288c02 into 876810db090c4c689c2a5d1717ce5640(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:18,354 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:18,354 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/B, priority=13, startTime=1734237558296; duration=0sec 2024-12-15T04:39:18,354 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:18,354 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:B 2024-12-15T04:39:18,354 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:18,356 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:18,356 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/C is initiating minor compaction (all files) 2024-12-15T04:39:18,356 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/C in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:18,356 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/ff4dd7e0f5ae45ad9457288bcbf9d150, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/15883e7afae24e6187af97f5d1374e2d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/5d9cc14312fe42308b056378aa49cc09] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=35.5 K 2024-12-15T04:39:18,356 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting ff4dd7e0f5ae45ad9457288bcbf9d150, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1734237555888 2024-12-15T04:39:18,357 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 15883e7afae24e6187af97f5d1374e2d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1734237556007 2024-12-15T04:39:18,357 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d9cc14312fe42308b056378aa49cc09, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1734237557130 2024-12-15T04:39:18,365 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#C#compaction#236 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:18,365 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/d43fefac83634900807ba422a8b377a7 is 50, key is test_row_0/C:col10/1734237557441/Put/seqid=0 2024-12-15T04:39:18,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742113_1289 (size=12409) 2024-12-15T04:39:18,378 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/d43fefac83634900807ba422a8b377a7 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/d43fefac83634900807ba422a8b377a7 2024-12-15T04:39:18,384 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/C of 35d053ddd9bda1d702f0fa4863288c02 into d43fefac83634900807ba422a8b377a7(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:18,384 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:18,384 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/C, priority=13, startTime=1734237558296; duration=0sec 2024-12-15T04:39:18,384 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:18,384 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:C 2024-12-15T04:39:18,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-15T04:39:18,457 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:18,458 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-12-15T04:39:18,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:18,458 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 35d053ddd9bda1d702f0fa4863288c02 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-15T04:39:18,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=A 2024-12-15T04:39:18,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:18,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=B 2024-12-15T04:39:18,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:18,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=C 2024-12-15T04:39:18,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:18,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/14ad8a9e44f24c559d49c157a54dd8e3 is 50, key is test_row_0/A:col10/1734237557455/Put/seqid=0 2024-12-15T04:39:18,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742114_1290 (size=12151) 2024-12-15T04:39:18,475 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/14ad8a9e44f24c559d49c157a54dd8e3 2024-12-15T04:39:18,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/c9dfc561c1e14f1da16c1da47b6f1b07 is 50, key is test_row_0/B:col10/1734237557455/Put/seqid=0 2024-12-15T04:39:18,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742115_1291 (size=12151) 2024-12-15T04:39:18,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:18,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:18,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:18,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237618575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:18,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:18,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237618576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:18,578 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:18,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237618576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:18,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:18,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237618577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:18,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:18,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237618578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:18,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-15T04:39:18,679 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:18,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237618679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:18,680 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:18,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237618679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:18,680 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:18,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237618679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:18,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:18,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237618679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:18,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:18,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237618680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:18,741 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/f10853df19204824ac28b02ed8bfbf8d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f10853df19204824ac28b02ed8bfbf8d 2024-12-15T04:39:18,744 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/A of 35d053ddd9bda1d702f0fa4863288c02 into f10853df19204824ac28b02ed8bfbf8d(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:18,744 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:18,744 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/A, priority=13, startTime=1734237558296; duration=0sec 2024-12-15T04:39:18,744 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:18,744 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:A 2024-12-15T04:39:18,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:18,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237618881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:18,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:18,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237618881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:18,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:18,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237618881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:18,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:18,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237618882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:18,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:18,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237618882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:18,887 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/c9dfc561c1e14f1da16c1da47b6f1b07 2024-12-15T04:39:18,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/2fb53dbc6c1c43b7b67a3b219586ec36 is 50, key is test_row_0/C:col10/1734237557455/Put/seqid=0 2024-12-15T04:39:18,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742116_1292 (size=12151) 2024-12-15T04:39:18,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-15T04:39:19,184 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:19,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237619184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:19,185 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:19,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237619184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:19,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:19,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237619184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:19,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:19,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237619185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:19,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:19,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237619186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:19,297 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/2fb53dbc6c1c43b7b67a3b219586ec36 2024-12-15T04:39:19,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/14ad8a9e44f24c559d49c157a54dd8e3 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/14ad8a9e44f24c559d49c157a54dd8e3 2024-12-15T04:39:19,303 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/14ad8a9e44f24c559d49c157a54dd8e3, entries=150, sequenceid=156, filesize=11.9 K 2024-12-15T04:39:19,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/c9dfc561c1e14f1da16c1da47b6f1b07 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/c9dfc561c1e14f1da16c1da47b6f1b07 2024-12-15T04:39:19,307 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/c9dfc561c1e14f1da16c1da47b6f1b07, entries=150, sequenceid=156, filesize=11.9 K 2024-12-15T04:39:19,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/2fb53dbc6c1c43b7b67a3b219586ec36 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/2fb53dbc6c1c43b7b67a3b219586ec36 2024-12-15T04:39:19,311 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/2fb53dbc6c1c43b7b67a3b219586ec36, entries=150, sequenceid=156, filesize=11.9 K 2024-12-15T04:39:19,311 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 35d053ddd9bda1d702f0fa4863288c02 in 853ms, sequenceid=156, compaction requested=false 2024-12-15T04:39:19,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:19,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:19,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-12-15T04:39:19,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-12-15T04:39:19,314 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-12-15T04:39:19,314 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0080 sec 2024-12-15T04:39:19,315 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.0110 sec 2024-12-15T04:39:19,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-12-15T04:39:19,409 INFO [Thread-1199 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-12-15T04:39:19,410 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:39:19,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-12-15T04:39:19,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-15T04:39:19,411 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:39:19,411 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:39:19,411 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:39:19,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-15T04:39:19,563 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:19,563 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-12-15T04:39:19,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:19,563 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 35d053ddd9bda1d702f0fa4863288c02 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-15T04:39:19,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=A 2024-12-15T04:39:19,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:19,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=B 2024-12-15T04:39:19,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:19,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=C 2024-12-15T04:39:19,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:19,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/a115a8306a4847a09b25e4953a355e1e is 50, key is test_row_0/A:col10/1734237558576/Put/seqid=0 2024-12-15T04:39:19,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742117_1293 (size=12151) 2024-12-15T04:39:19,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:19,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:19,701 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:19,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237619698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:19,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:19,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237619699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:19,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:19,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237619699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:19,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:19,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237619700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:19,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:19,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237619700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:19,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-15T04:39:19,803 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:19,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237619802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:19,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:19,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237619802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:19,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:19,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:19,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237619802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:19,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237619803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:19,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:19,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237619803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:19,970 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/a115a8306a4847a09b25e4953a355e1e 2024-12-15T04:39:19,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/8018c73d683c43b29d5d6ed0f417adaf is 50, key is test_row_0/B:col10/1734237558576/Put/seqid=0 2024-12-15T04:39:19,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742118_1294 (size=12151) 2024-12-15T04:39:20,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:20,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237620004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:20,006 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:20,006 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:20,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237620004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:20,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237620005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:20,006 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:20,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237620005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:20,006 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:20,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237620005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:20,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-15T04:39:20,307 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:20,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237620306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:20,307 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:20,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237620306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:20,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:20,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237620307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:20,309 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:20,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237620308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:20,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:20,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237620308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:20,379 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/8018c73d683c43b29d5d6ed0f417adaf 2024-12-15T04:39:20,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/ccc0c76ac6274f0284596fa5763b211b is 50, key is test_row_0/C:col10/1734237558576/Put/seqid=0 2024-12-15T04:39:20,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742119_1295 (size=12151) 2024-12-15T04:39:20,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-15T04:39:20,789 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/ccc0c76ac6274f0284596fa5763b211b 2024-12-15T04:39:20,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/a115a8306a4847a09b25e4953a355e1e as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/a115a8306a4847a09b25e4953a355e1e 2024-12-15T04:39:20,796 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/a115a8306a4847a09b25e4953a355e1e, entries=150, sequenceid=174, filesize=11.9 K 2024-12-15T04:39:20,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/8018c73d683c43b29d5d6ed0f417adaf as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/8018c73d683c43b29d5d6ed0f417adaf 2024-12-15T04:39:20,800 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/8018c73d683c43b29d5d6ed0f417adaf, entries=150, sequenceid=174, filesize=11.9 K 2024-12-15T04:39:20,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/ccc0c76ac6274f0284596fa5763b211b as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/ccc0c76ac6274f0284596fa5763b211b 2024-12-15T04:39:20,806 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/ccc0c76ac6274f0284596fa5763b211b, entries=150, sequenceid=174, filesize=11.9 K 2024-12-15T04:39:20,807 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 35d053ddd9bda1d702f0fa4863288c02 in 1244ms, sequenceid=174, compaction requested=true 2024-12-15T04:39:20,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:20,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:20,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-12-15T04:39:20,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-12-15T04:39:20,809 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-15T04:39:20,809 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3970 sec 2024-12-15T04:39:20,810 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.3990 sec 2024-12-15T04:39:20,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:20,811 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 35d053ddd9bda1d702f0fa4863288c02 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-15T04:39:20,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=A 2024-12-15T04:39:20,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:20,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=B 2024-12-15T04:39:20,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:20,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=C 2024-12-15T04:39:20,811 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:20,815 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/0133f167435648e1b80705964ce06bfc is 50, key is test_row_0/A:col10/1734237560810/Put/seqid=0 2024-12-15T04:39:20,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742120_1296 (size=12151) 2024-12-15T04:39:20,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:20,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237620819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:20,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:20,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237620822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:20,824 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:20,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237620822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:20,824 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:20,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237620823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:20,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:20,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237620823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:20,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:20,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237620923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:20,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:20,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237620925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:20,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:20,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237620925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:20,926 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:20,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237620925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:20,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:20,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237620925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:21,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:21,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237621125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:21,128 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:21,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237621127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:21,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:21,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237621128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:21,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:21,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237621128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:21,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:21,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237621128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:21,221 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/0133f167435648e1b80705964ce06bfc 2024-12-15T04:39:21,228 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/6fbb9d4539c047478dfaafdbe7938712 is 50, key is test_row_0/B:col10/1734237560810/Put/seqid=0 2024-12-15T04:39:21,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742121_1297 (size=12151) 2024-12-15T04:39:21,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:21,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237621428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:21,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:21,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237621429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:21,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:21,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237621430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:21,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:21,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237621430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:21,433 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:21,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237621431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:21,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-12-15T04:39:21,514 INFO [Thread-1199 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-12-15T04:39:21,515 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:39:21,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-12-15T04:39:21,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-15T04:39:21,516 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:39:21,517 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:39:21,517 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:39:21,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-15T04:39:21,631 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/6fbb9d4539c047478dfaafdbe7938712 2024-12-15T04:39:21,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/ab02d1ce0abc4af9be09cc6d0892ebed is 50, key is test_row_0/C:col10/1734237560810/Put/seqid=0 2024-12-15T04:39:21,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742122_1298 (size=12151) 2024-12-15T04:39:21,640 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/ab02d1ce0abc4af9be09cc6d0892ebed 2024-12-15T04:39:21,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/0133f167435648e1b80705964ce06bfc as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/0133f167435648e1b80705964ce06bfc 2024-12-15T04:39:21,646 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/0133f167435648e1b80705964ce06bfc, entries=150, sequenceid=194, filesize=11.9 K 2024-12-15T04:39:21,647 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/6fbb9d4539c047478dfaafdbe7938712 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/6fbb9d4539c047478dfaafdbe7938712 2024-12-15T04:39:21,650 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/6fbb9d4539c047478dfaafdbe7938712, entries=150, sequenceid=194, filesize=11.9 K 2024-12-15T04:39:21,651 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/ab02d1ce0abc4af9be09cc6d0892ebed as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/ab02d1ce0abc4af9be09cc6d0892ebed 2024-12-15T04:39:21,655 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/ab02d1ce0abc4af9be09cc6d0892ebed, entries=150, sequenceid=194, filesize=11.9 K 2024-12-15T04:39:21,655 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 35d053ddd9bda1d702f0fa4863288c02 in 844ms, sequenceid=194, compaction requested=true 2024-12-15T04:39:21,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:21,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:39:21,655 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:21,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:39:21,656 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-15T04:39:21,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:21,656 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-15T04:39:21,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:39:21,656 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:21,656 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-15T04:39:21,656 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-15T04:39:21,656 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/A is initiating minor compaction (all files) 2024-12-15T04:39:21,656 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/B is initiating minor compaction (all files) 2024-12-15T04:39:21,657 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/A in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:21,657 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/B in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:21,657 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f10853df19204824ac28b02ed8bfbf8d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/14ad8a9e44f24c559d49c157a54dd8e3, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/a115a8306a4847a09b25e4953a355e1e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/0133f167435648e1b80705964ce06bfc] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=47.7 K 2024-12-15T04:39:21,657 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/876810db090c4c689c2a5d1717ce5640, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/c9dfc561c1e14f1da16c1da47b6f1b07, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/8018c73d683c43b29d5d6ed0f417adaf, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/6fbb9d4539c047478dfaafdbe7938712] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=47.7 K 2024-12-15T04:39:21,657 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting f10853df19204824ac28b02ed8bfbf8d, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1734237557130 2024-12-15T04:39:21,657 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 876810db090c4c689c2a5d1717ce5640, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1734237557130 2024-12-15T04:39:21,657 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14ad8a9e44f24c559d49c157a54dd8e3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1734237557451 2024-12-15T04:39:21,657 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting c9dfc561c1e14f1da16c1da47b6f1b07, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1734237557451 2024-12-15T04:39:21,658 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting a115a8306a4847a09b25e4953a355e1e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1734237558572 2024-12-15T04:39:21,658 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 8018c73d683c43b29d5d6ed0f417adaf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1734237558572 2024-12-15T04:39:21,658 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 6fbb9d4539c047478dfaafdbe7938712, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1734237559698 2024-12-15T04:39:21,658 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0133f167435648e1b80705964ce06bfc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1734237559698 2024-12-15T04:39:21,666 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#B#compaction#247 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:21,667 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/d6a640e6e1d14dc8a0e53ee44e3296c1 is 50, key is test_row_0/B:col10/1734237560810/Put/seqid=0 2024-12-15T04:39:21,668 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:21,668 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#A#compaction#246 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:21,668 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-12-15T04:39:21,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:21,668 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/494c49b6b0d74862a0b037d94c97fbb6 is 50, key is test_row_0/A:col10/1734237560810/Put/seqid=0 2024-12-15T04:39:21,668 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 35d053ddd9bda1d702f0fa4863288c02 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-15T04:39:21,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=A 2024-12-15T04:39:21,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:21,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=B 2024-12-15T04:39:21,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:21,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=C 2024-12-15T04:39:21,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:21,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742123_1299 (size=12595) 2024-12-15T04:39:21,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742124_1300 (size=12595) 2024-12-15T04:39:21,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/1521474aa25b4731b70138a0dd0be0d6 is 50, key is test_row_0/A:col10/1734237560821/Put/seqid=0 2024-12-15T04:39:21,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742125_1301 (size=12151) 2024-12-15T04:39:21,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-15T04:39:21,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:21,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:21,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:21,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237621965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:21,970 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:21,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237621966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:21,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:21,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237621970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:21,974 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:21,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237621970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:21,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:21,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237621970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:22,072 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:22,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237622071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:22,072 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:22,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237622071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:22,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:22,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237622074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:22,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:22,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237622074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:22,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:22,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237622074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:22,089 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/d6a640e6e1d14dc8a0e53ee44e3296c1 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/d6a640e6e1d14dc8a0e53ee44e3296c1 2024-12-15T04:39:22,093 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/B of 35d053ddd9bda1d702f0fa4863288c02 into d6a640e6e1d14dc8a0e53ee44e3296c1(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:22,093 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:22,093 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/B, priority=12, startTime=1734237561655; duration=0sec 2024-12-15T04:39:22,093 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:22,093 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:B 2024-12-15T04:39:22,093 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-15T04:39:22,094 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-15T04:39:22,094 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/C is initiating minor compaction (all files) 2024-12-15T04:39:22,094 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/C in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:22,094 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/d43fefac83634900807ba422a8b377a7, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/2fb53dbc6c1c43b7b67a3b219586ec36, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/ccc0c76ac6274f0284596fa5763b211b, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/ab02d1ce0abc4af9be09cc6d0892ebed] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=47.7 K 2024-12-15T04:39:22,095 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting d43fefac83634900807ba422a8b377a7, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1734237557130 2024-12-15T04:39:22,095 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 2fb53dbc6c1c43b7b67a3b219586ec36, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1734237557451 2024-12-15T04:39:22,095 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting ccc0c76ac6274f0284596fa5763b211b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1734237558572 2024-12-15T04:39:22,095 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting ab02d1ce0abc4af9be09cc6d0892ebed, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1734237559698 2024-12-15T04:39:22,096 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/494c49b6b0d74862a0b037d94c97fbb6 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/494c49b6b0d74862a0b037d94c97fbb6 2024-12-15T04:39:22,097 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/1521474aa25b4731b70138a0dd0be0d6 2024-12-15T04:39:22,101 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/A of 35d053ddd9bda1d702f0fa4863288c02 into 494c49b6b0d74862a0b037d94c97fbb6(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:22,101 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:22,101 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/A, priority=12, startTime=1734237561655; duration=0sec 2024-12-15T04:39:22,101 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:22,101 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:A 2024-12-15T04:39:22,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/cc918cfc8db54efc8c2868631653b4b7 is 50, key is test_row_0/B:col10/1734237560821/Put/seqid=0 2024-12-15T04:39:22,107 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#C#compaction#250 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:22,107 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/9c7a557cfe764654aa578fcbe5f19318 is 50, key is test_row_0/C:col10/1734237560810/Put/seqid=0 2024-12-15T04:39:22,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742127_1303 (size=12595) 2024-12-15T04:39:22,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742126_1302 (size=12151) 2024-12-15T04:39:22,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-15T04:39:22,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:22,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237622273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:22,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:22,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237622274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:22,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:22,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237622277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:22,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:22,280 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:22,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237622277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:22,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237622277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:22,518 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/cc918cfc8db54efc8c2868631653b4b7 2024-12-15T04:39:22,522 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/9c7a557cfe764654aa578fcbe5f19318 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/9c7a557cfe764654aa578fcbe5f19318 2024-12-15T04:39:22,526 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/C of 35d053ddd9bda1d702f0fa4863288c02 into 9c7a557cfe764654aa578fcbe5f19318(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:22,526 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:22,526 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/C, priority=12, startTime=1734237561656; duration=0sec 2024-12-15T04:39:22,526 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:22,526 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:C 2024-12-15T04:39:22,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/8243248267b54bd58186c9494f784564 is 50, key is test_row_0/C:col10/1734237560821/Put/seqid=0 2024-12-15T04:39:22,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742128_1304 (size=12151) 2024-12-15T04:39:22,576 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:22,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237622575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:22,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:22,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237622577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:22,581 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:22,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237622580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:22,581 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:22,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237622580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:22,583 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:22,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237622582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:22,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-15T04:39:22,956 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/8243248267b54bd58186c9494f784564 2024-12-15T04:39:22,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/1521474aa25b4731b70138a0dd0be0d6 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/1521474aa25b4731b70138a0dd0be0d6 2024-12-15T04:39:22,962 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/1521474aa25b4731b70138a0dd0be0d6, entries=150, sequenceid=211, filesize=11.9 K 2024-12-15T04:39:22,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/cc918cfc8db54efc8c2868631653b4b7 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/cc918cfc8db54efc8c2868631653b4b7 2024-12-15T04:39:22,965 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/cc918cfc8db54efc8c2868631653b4b7, entries=150, sequenceid=211, filesize=11.9 K 2024-12-15T04:39:22,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/8243248267b54bd58186c9494f784564 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/8243248267b54bd58186c9494f784564 2024-12-15T04:39:22,969 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/8243248267b54bd58186c9494f784564, entries=150, sequenceid=211, filesize=11.9 K 2024-12-15T04:39:22,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-12-15T04:39:22,970 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 35d053ddd9bda1d702f0fa4863288c02 in 1302ms, sequenceid=211, compaction requested=false 2024-12-15T04:39:22,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:22,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:22,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-12-15T04:39:22,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-12-15T04:39:22,972 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-15T04:39:22,972 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4540 sec 2024-12-15T04:39:22,973 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 1.4570 sec 2024-12-15T04:39:23,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:23,080 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 35d053ddd9bda1d702f0fa4863288c02 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-15T04:39:23,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=A 2024-12-15T04:39:23,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:23,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=B 2024-12-15T04:39:23,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:23,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=C 2024-12-15T04:39:23,080 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:23,083 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/7b4c5b05df3443e3abc4dc9759b6e758 is 50, key is test_row_0/A:col10/1734237561940/Put/seqid=0 2024-12-15T04:39:23,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742129_1305 (size=12151) 2024-12-15T04:39:23,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:23,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237623089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:23,091 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:23,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237623090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:23,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:23,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237623090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:23,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:23,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237623090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:23,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:23,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237623091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:23,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:23,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237623192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:23,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:23,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237623192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:23,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:23,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237623193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:23,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:23,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237623193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:23,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:23,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237623193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:23,395 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:23,395 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:23,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237623394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:23,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237623394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:23,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:23,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237623394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:23,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:23,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237623395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:23,396 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:23,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237623395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:23,487 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/7b4c5b05df3443e3abc4dc9759b6e758 2024-12-15T04:39:23,492 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/103882d1f43c4bcc8a5a3dcbfdfa566a is 50, key is test_row_0/B:col10/1734237561940/Put/seqid=0 2024-12-15T04:39:23,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742130_1306 (size=12151) 2024-12-15T04:39:23,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-12-15T04:39:23,620 INFO [Thread-1199 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-12-15T04:39:23,620 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:39:23,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-12-15T04:39:23,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-15T04:39:23,622 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:39:23,622 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:39:23,622 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:39:23,698 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:23,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237623698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:23,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:23,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237623698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:23,699 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:23,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237623698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:23,701 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:23,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237623700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:23,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:23,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237623701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:23,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-15T04:39:23,773 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:23,773 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-15T04:39:23,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:23,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:23,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:23,774 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:23,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:23,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:23,895 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/103882d1f43c4bcc8a5a3dcbfdfa566a 2024-12-15T04:39:23,901 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/8ddf7443c5e945e9b40983708be741cd is 50, key is test_row_0/C:col10/1734237561940/Put/seqid=0 2024-12-15T04:39:23,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742131_1307 (size=12151) 2024-12-15T04:39:23,904 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/8ddf7443c5e945e9b40983708be741cd 2024-12-15T04:39:23,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/7b4c5b05df3443e3abc4dc9759b6e758 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/7b4c5b05df3443e3abc4dc9759b6e758 2024-12-15T04:39:23,910 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/7b4c5b05df3443e3abc4dc9759b6e758, entries=150, sequenceid=235, filesize=11.9 K 2024-12-15T04:39:23,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/103882d1f43c4bcc8a5a3dcbfdfa566a as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/103882d1f43c4bcc8a5a3dcbfdfa566a 2024-12-15T04:39:23,914 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/103882d1f43c4bcc8a5a3dcbfdfa566a, entries=150, sequenceid=235, filesize=11.9 K 2024-12-15T04:39:23,914 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/8ddf7443c5e945e9b40983708be741cd as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/8ddf7443c5e945e9b40983708be741cd 2024-12-15T04:39:23,918 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/8ddf7443c5e945e9b40983708be741cd, entries=150, sequenceid=235, filesize=11.9 K 2024-12-15T04:39:23,919 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 35d053ddd9bda1d702f0fa4863288c02 in 839ms, sequenceid=235, compaction requested=true 2024-12-15T04:39:23,919 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:23,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:39:23,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:23,919 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:23,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:39:23,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:23,919 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:23,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:39:23,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:23,922 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:23,922 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/A is initiating minor compaction (all files) 2024-12-15T04:39:23,922 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/A in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:23,922 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/494c49b6b0d74862a0b037d94c97fbb6, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/1521474aa25b4731b70138a0dd0be0d6, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/7b4c5b05df3443e3abc4dc9759b6e758] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=36.0 K 2024-12-15T04:39:23,923 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:23,923 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 494c49b6b0d74862a0b037d94c97fbb6, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1734237559698 2024-12-15T04:39:23,923 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/B is initiating minor compaction (all files) 2024-12-15T04:39:23,923 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/B in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:23,923 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/d6a640e6e1d14dc8a0e53ee44e3296c1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/cc918cfc8db54efc8c2868631653b4b7, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/103882d1f43c4bcc8a5a3dcbfdfa566a] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=36.0 K 2024-12-15T04:39:23,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-15T04:39:23,923 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1521474aa25b4731b70138a0dd0be0d6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734237560818 2024-12-15T04:39:23,923 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting d6a640e6e1d14dc8a0e53ee44e3296c1, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1734237559698 2024-12-15T04:39:23,923 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b4c5b05df3443e3abc4dc9759b6e758, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1734237561940 2024-12-15T04:39:23,925 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting cc918cfc8db54efc8c2868631653b4b7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734237560818 2024-12-15T04:39:23,926 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 103882d1f43c4bcc8a5a3dcbfdfa566a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1734237561940 2024-12-15T04:39:23,926 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:23,926 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-12-15T04:39:23,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:23,926 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 35d053ddd9bda1d702f0fa4863288c02 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-15T04:39:23,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=A 2024-12-15T04:39:23,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:23,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=B 2024-12-15T04:39:23,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:23,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=C 2024-12-15T04:39:23,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:23,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/2504bc65377d4a35ab784abf0435dff8 is 50, key is test_row_0/A:col10/1734237563089/Put/seqid=0 2024-12-15T04:39:23,935 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#A#compaction#256 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:23,935 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#B#compaction#257 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:23,936 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/5dbab4b7afd94d11843d9f46c6471ecd is 50, key is test_row_0/B:col10/1734237561940/Put/seqid=0 2024-12-15T04:39:23,936 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/ab9bb3552cf54143bb4ef0af94d2a02b is 50, key is test_row_0/A:col10/1734237561940/Put/seqid=0 2024-12-15T04:39:23,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742132_1308 (size=9757) 2024-12-15T04:39:23,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742134_1310 (size=12697) 2024-12-15T04:39:23,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742133_1309 (size=12697) 2024-12-15T04:39:23,945 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/5dbab4b7afd94d11843d9f46c6471ecd as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/5dbab4b7afd94d11843d9f46c6471ecd 2024-12-15T04:39:23,949 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/B of 35d053ddd9bda1d702f0fa4863288c02 into 5dbab4b7afd94d11843d9f46c6471ecd(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:23,949 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:23,949 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/B, priority=13, startTime=1734237563919; duration=0sec 2024-12-15T04:39:23,949 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:23,949 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:B 2024-12-15T04:39:23,949 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:23,950 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:23,950 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/C is initiating minor compaction (all files) 2024-12-15T04:39:23,950 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/C in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:23,950 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/9c7a557cfe764654aa578fcbe5f19318, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/8243248267b54bd58186c9494f784564, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/8ddf7443c5e945e9b40983708be741cd] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=36.0 K 2024-12-15T04:39:23,951 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c7a557cfe764654aa578fcbe5f19318, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1734237559698 2024-12-15T04:39:23,951 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 8243248267b54bd58186c9494f784564, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734237560818 2024-12-15T04:39:23,951 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ddf7443c5e945e9b40983708be741cd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1734237561940 2024-12-15T04:39:23,957 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#C#compaction#258 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:23,957 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/0e11571deb1c4ea3a9186ad8d0bce206 is 50, key is test_row_0/C:col10/1734237561940/Put/seqid=0 2024-12-15T04:39:23,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742135_1311 (size=12697) 2024-12-15T04:39:23,966 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/0e11571deb1c4ea3a9186ad8d0bce206 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/0e11571deb1c4ea3a9186ad8d0bce206 2024-12-15T04:39:23,970 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/C of 35d053ddd9bda1d702f0fa4863288c02 into 0e11571deb1c4ea3a9186ad8d0bce206(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:23,970 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:23,970 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/C, priority=13, startTime=1734237563919; duration=0sec 2024-12-15T04:39:23,970 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:23,970 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:C 2024-12-15T04:39:24,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:24,202 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:24,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:24,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237624211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:24,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:24,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237624212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:24,214 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:24,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237624213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:24,215 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:24,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:24,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237624213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:24,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237624213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:24,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-15T04:39:24,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:24,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237624314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:24,315 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:24,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237624314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:24,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:24,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237624315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:24,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:24,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237624315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:24,316 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:24,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237624315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:24,339 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/2504bc65377d4a35ab784abf0435dff8 2024-12-15T04:39:24,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/f3d9599ce8984889a4eb48069bd067e8 is 50, key is test_row_0/B:col10/1734237563089/Put/seqid=0 2024-12-15T04:39:24,345 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/ab9bb3552cf54143bb4ef0af94d2a02b as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/ab9bb3552cf54143bb4ef0af94d2a02b 2024-12-15T04:39:24,350 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/A of 35d053ddd9bda1d702f0fa4863288c02 into ab9bb3552cf54143bb4ef0af94d2a02b(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:24,350 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:24,350 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/A, priority=13, startTime=1734237563919; duration=0sec 2024-12-15T04:39:24,350 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:24,350 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:A 2024-12-15T04:39:24,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742136_1312 (size=9757) 2024-12-15T04:39:24,517 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:24,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237624517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:24,518 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:24,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237624517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:24,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:24,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237624517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:24,519 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:24,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237624518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:24,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:24,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237624519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:24,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-15T04:39:24,753 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/f3d9599ce8984889a4eb48069bd067e8 2024-12-15T04:39:24,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/0dac446ff3dd4d20a614b6067cd58f7c is 50, key is test_row_0/C:col10/1734237563089/Put/seqid=0 2024-12-15T04:39:24,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742137_1313 (size=9757) 2024-12-15T04:39:24,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:24,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:24,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237624820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:24,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237624820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:24,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:24,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237624820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:24,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:24,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237624821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:24,823 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:24,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237624821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:25,163 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/0dac446ff3dd4d20a614b6067cd58f7c 2024-12-15T04:39:25,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/2504bc65377d4a35ab784abf0435dff8 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/2504bc65377d4a35ab784abf0435dff8 2024-12-15T04:39:25,169 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/2504bc65377d4a35ab784abf0435dff8, entries=100, sequenceid=250, filesize=9.5 K 2024-12-15T04:39:25,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/f3d9599ce8984889a4eb48069bd067e8 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/f3d9599ce8984889a4eb48069bd067e8 2024-12-15T04:39:25,172 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/f3d9599ce8984889a4eb48069bd067e8, entries=100, sequenceid=250, filesize=9.5 K 2024-12-15T04:39:25,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/0dac446ff3dd4d20a614b6067cd58f7c as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/0dac446ff3dd4d20a614b6067cd58f7c 2024-12-15T04:39:25,176 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/0dac446ff3dd4d20a614b6067cd58f7c, entries=100, sequenceid=250, filesize=9.5 K 2024-12-15T04:39:25,177 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 35d053ddd9bda1d702f0fa4863288c02 in 1251ms, sequenceid=250, compaction requested=false 2024-12-15T04:39:25,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:25,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:25,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-12-15T04:39:25,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-12-15T04:39:25,181 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-12-15T04:39:25,181 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5570 sec 2024-12-15T04:39:25,182 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.5610 sec 2024-12-15T04:39:25,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:25,325 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 35d053ddd9bda1d702f0fa4863288c02 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-15T04:39:25,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=A 2024-12-15T04:39:25,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:25,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=B 2024-12-15T04:39:25,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:25,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=C 2024-12-15T04:39:25,326 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:25,329 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/19a5614622364abb93ca9d5139d35c50 is 50, key is test_row_0/A:col10/1734237565324/Put/seqid=0 2024-12-15T04:39:25,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742138_1314 (size=12301) 2024-12-15T04:39:25,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:25,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:25,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237625336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:25,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237625336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:25,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:25,339 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:25,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237625337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:25,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237625336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:25,339 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:25,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237625337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:25,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:25,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:25,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237625440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:25,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237625440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:25,441 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:25,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237625440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:25,441 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:25,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237625440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:25,441 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:25,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237625440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:25,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:25,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:25,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237625641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:25,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237625641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:25,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:25,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:25,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237625642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:25,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237625642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:25,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:25,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237625642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:25,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-12-15T04:39:25,725 INFO [Thread-1199 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-12-15T04:39:25,726 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:39:25,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-12-15T04:39:25,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-15T04:39:25,727 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:39:25,728 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:39:25,728 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:39:25,733 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/19a5614622364abb93ca9d5139d35c50 2024-12-15T04:39:25,738 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/25c77a7565c54b71b66b3edc9dfb8fd1 is 50, key is test_row_0/B:col10/1734237565324/Put/seqid=0 2024-12-15T04:39:25,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742139_1315 (size=12301) 2024-12-15T04:39:25,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-15T04:39:25,879 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:25,879 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-15T04:39:25,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:25,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:25,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:25,879 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:25,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:25,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:25,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:25,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237625944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:25,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:25,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237625944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:25,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:25,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237625945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:25,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:25,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237625945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:25,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:25,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237625946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:26,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-15T04:39:26,031 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:26,031 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-15T04:39:26,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:26,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:26,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:26,032 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:26,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:26,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:26,142 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/25c77a7565c54b71b66b3edc9dfb8fd1 2024-12-15T04:39:26,146 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/e836ac86903c4ae2849834e8b4693a80 is 50, key is test_row_0/C:col10/1734237565324/Put/seqid=0 2024-12-15T04:39:26,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742140_1316 (size=12301) 2024-12-15T04:39:26,183 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:26,184 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-15T04:39:26,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:26,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:26,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:26,184 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:26,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:26,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:26,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-15T04:39:26,336 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:26,336 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-15T04:39:26,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:26,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:26,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:26,336 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:26,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:26,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:26,447 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:26,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237626446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:26,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:26,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237626447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:26,448 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:26,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237626447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:26,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:26,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237626448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:26,452 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:26,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237626452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:26,488 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:26,488 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-15T04:39:26,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:26,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:26,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:26,488 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:26,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:26,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:26,550 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/e836ac86903c4ae2849834e8b4693a80 2024-12-15T04:39:26,554 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/19a5614622364abb93ca9d5139d35c50 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/19a5614622364abb93ca9d5139d35c50 2024-12-15T04:39:26,557 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/19a5614622364abb93ca9d5139d35c50, entries=150, sequenceid=275, filesize=12.0 K 2024-12-15T04:39:26,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/25c77a7565c54b71b66b3edc9dfb8fd1 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/25c77a7565c54b71b66b3edc9dfb8fd1 2024-12-15T04:39:26,560 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/25c77a7565c54b71b66b3edc9dfb8fd1, entries=150, sequenceid=275, filesize=12.0 K 2024-12-15T04:39:26,561 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/e836ac86903c4ae2849834e8b4693a80 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/e836ac86903c4ae2849834e8b4693a80 2024-12-15T04:39:26,564 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/e836ac86903c4ae2849834e8b4693a80, entries=150, sequenceid=275, filesize=12.0 K 2024-12-15T04:39:26,564 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 35d053ddd9bda1d702f0fa4863288c02 in 1239ms, sequenceid=275, compaction requested=true 2024-12-15T04:39:26,564 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:26,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:39:26,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:26,565 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:26,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:39:26,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:26,565 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:26,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:39:26,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:26,565 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:26,565 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:26,566 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/B is initiating minor compaction (all files) 2024-12-15T04:39:26,566 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/A is initiating minor compaction (all files) 2024-12-15T04:39:26,566 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/A in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:26,566 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/ab9bb3552cf54143bb4ef0af94d2a02b, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/2504bc65377d4a35ab784abf0435dff8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/19a5614622364abb93ca9d5139d35c50] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=33.9 K 2024-12-15T04:39:26,566 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/B in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:26,566 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/5dbab4b7afd94d11843d9f46c6471ecd, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/f3d9599ce8984889a4eb48069bd067e8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/25c77a7565c54b71b66b3edc9dfb8fd1] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=33.9 K 2024-12-15T04:39:26,566 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting ab9bb3552cf54143bb4ef0af94d2a02b, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1734237561940 2024-12-15T04:39:26,566 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 5dbab4b7afd94d11843d9f46c6471ecd, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1734237561940 2024-12-15T04:39:26,566 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2504bc65377d4a35ab784abf0435dff8, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1734237563089 2024-12-15T04:39:26,566 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting f3d9599ce8984889a4eb48069bd067e8, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1734237563089 2024-12-15T04:39:26,566 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 25c77a7565c54b71b66b3edc9dfb8fd1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1734237564210 2024-12-15T04:39:26,567 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 19a5614622364abb93ca9d5139d35c50, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1734237564210 2024-12-15T04:39:26,573 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#B#compaction#264 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:26,573 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/75b9a43c624f4147948b55c260c20d45 is 50, key is test_row_0/B:col10/1734237565324/Put/seqid=0 2024-12-15T04:39:26,573 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#A#compaction#265 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:26,574 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/5c8643fb1b4e4e9596cd59235c4f5285 is 50, key is test_row_0/A:col10/1734237565324/Put/seqid=0 2024-12-15T04:39:26,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742141_1317 (size=12949) 2024-12-15T04:39:26,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742142_1318 (size=12949) 2024-12-15T04:39:26,640 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:26,640 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-12-15T04:39:26,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:26,641 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 35d053ddd9bda1d702f0fa4863288c02 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-15T04:39:26,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=A 2024-12-15T04:39:26,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:26,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=B 2024-12-15T04:39:26,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:26,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=C 2024-12-15T04:39:26,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:26,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/be449ed8543048b2b8c40c29bcda7ffa is 50, key is test_row_0/A:col10/1734237565336/Put/seqid=0 2024-12-15T04:39:26,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742143_1319 (size=12301) 2024-12-15T04:39:26,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-15T04:39:26,989 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/5c8643fb1b4e4e9596cd59235c4f5285 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/5c8643fb1b4e4e9596cd59235c4f5285 2024-12-15T04:39:26,990 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/75b9a43c624f4147948b55c260c20d45 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/75b9a43c624f4147948b55c260c20d45 2024-12-15T04:39:26,993 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/B of 35d053ddd9bda1d702f0fa4863288c02 into 75b9a43c624f4147948b55c260c20d45(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:26,993 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/A of 35d053ddd9bda1d702f0fa4863288c02 into 5c8643fb1b4e4e9596cd59235c4f5285(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:26,993 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:26,993 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:26,993 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/B, priority=13, startTime=1734237566565; duration=0sec 2024-12-15T04:39:26,993 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/A, priority=13, startTime=1734237566565; duration=0sec 2024-12-15T04:39:26,993 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:26,993 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:A 2024-12-15T04:39:26,993 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:26,993 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:B 2024-12-15T04:39:26,993 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:26,994 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:26,994 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/C is initiating minor compaction (all files) 2024-12-15T04:39:26,994 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/C in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:26,994 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/0e11571deb1c4ea3a9186ad8d0bce206, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/0dac446ff3dd4d20a614b6067cd58f7c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/e836ac86903c4ae2849834e8b4693a80] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=33.9 K 2024-12-15T04:39:26,995 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e11571deb1c4ea3a9186ad8d0bce206, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1734237561940 2024-12-15T04:39:26,995 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0dac446ff3dd4d20a614b6067cd58f7c, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1734237563089 2024-12-15T04:39:26,995 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting e836ac86903c4ae2849834e8b4693a80, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1734237564210 2024-12-15T04:39:27,000 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#C#compaction#267 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:27,001 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/acc6ca55612644c1bacc14df7f884733 is 50, key is test_row_0/C:col10/1734237565324/Put/seqid=0 2024-12-15T04:39:27,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742144_1320 (size=12949) 2024-12-15T04:39:27,053 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/be449ed8543048b2b8c40c29bcda7ffa 2024-12-15T04:39:27,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/306a323d8ca44b70ae0ae5cd646c56c1 is 50, key is test_row_0/B:col10/1734237565336/Put/seqid=0 2024-12-15T04:39:27,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742145_1321 (size=12301) 2024-12-15T04:39:27,411 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/acc6ca55612644c1bacc14df7f884733 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/acc6ca55612644c1bacc14df7f884733 2024-12-15T04:39:27,415 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/C of 35d053ddd9bda1d702f0fa4863288c02 into acc6ca55612644c1bacc14df7f884733(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:27,415 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:27,415 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/C, priority=13, startTime=1734237566565; duration=0sec 2024-12-15T04:39:27,415 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:27,415 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:C 2024-12-15T04:39:27,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:27,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:27,464 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/306a323d8ca44b70ae0ae5cd646c56c1 2024-12-15T04:39:27,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:27,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237627464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:27,467 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:27,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237627465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:27,468 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:27,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237627466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:27,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:27,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237627467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:27,470 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:27,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237627468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:27,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/e8d80737dc6340d4bf36943962ef4f38 is 50, key is test_row_0/C:col10/1734237565336/Put/seqid=0 2024-12-15T04:39:27,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742146_1322 (size=12301) 2024-12-15T04:39:27,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:27,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237627568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:27,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:27,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237627568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:27,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:27,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237627569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:27,571 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:27,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237627570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:27,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:27,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237627571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:27,770 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:27,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237627770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:27,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:27,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237627770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:27,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:27,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237627772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:27,773 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:27,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237627772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:27,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:27,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237627774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:27,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-15T04:39:27,878 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=290 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/e8d80737dc6340d4bf36943962ef4f38 2024-12-15T04:39:27,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/be449ed8543048b2b8c40c29bcda7ffa as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/be449ed8543048b2b8c40c29bcda7ffa 2024-12-15T04:39:27,913 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/be449ed8543048b2b8c40c29bcda7ffa, entries=150, sequenceid=290, filesize=12.0 K 2024-12-15T04:39:27,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/306a323d8ca44b70ae0ae5cd646c56c1 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/306a323d8ca44b70ae0ae5cd646c56c1 2024-12-15T04:39:27,917 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/306a323d8ca44b70ae0ae5cd646c56c1, entries=150, sequenceid=290, filesize=12.0 K 2024-12-15T04:39:27,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/e8d80737dc6340d4bf36943962ef4f38 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/e8d80737dc6340d4bf36943962ef4f38 2024-12-15T04:39:27,922 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/e8d80737dc6340d4bf36943962ef4f38, entries=150, sequenceid=290, filesize=12.0 K 2024-12-15T04:39:27,923 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 35d053ddd9bda1d702f0fa4863288c02 in 1282ms, sequenceid=290, compaction requested=false 2024-12-15T04:39:27,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:27,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:27,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-12-15T04:39:27,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-12-15T04:39:27,926 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-12-15T04:39:27,926 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1960 sec 2024-12-15T04:39:27,927 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 2.2000 sec 2024-12-15T04:39:28,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:28,075 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 35d053ddd9bda1d702f0fa4863288c02 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-15T04:39:28,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=A 2024-12-15T04:39:28,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:28,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=B 2024-12-15T04:39:28,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:28,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=C 2024-12-15T04:39:28,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:28,080 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/55b206e3371d4c5c84e8b92e164846f5 is 50, key is test_row_0/A:col10/1734237568075/Put/seqid=0 2024-12-15T04:39:28,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:28,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237628079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:28,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:28,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237628079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:28,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742147_1323 (size=12301) 2024-12-15T04:39:28,083 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:28,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237628081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:28,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:28,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237628081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:28,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:28,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237628082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:28,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:28,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237628182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:28,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:28,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237628182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:28,185 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:28,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237628184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:28,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:28,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237628184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:28,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:28,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237628185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:28,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:28,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237628384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:28,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:28,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237628385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:28,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:28,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237628385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:28,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:28,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237628386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:28,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:28,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237628387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:28,484 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/55b206e3371d4c5c84e8b92e164846f5 2024-12-15T04:39:28,489 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/825050e6cc104c569cd319fe44745d00 is 50, key is test_row_0/B:col10/1734237568075/Put/seqid=0 2024-12-15T04:39:28,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742148_1324 (size=12301) 2024-12-15T04:39:28,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:28,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237628687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:28,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:28,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237628688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:28,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:28,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237628688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:28,691 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:28,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237628689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:28,691 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:28,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237628690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:28,893 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/825050e6cc104c569cd319fe44745d00 2024-12-15T04:39:28,899 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/0e49508d6d304f43b1c552c8ed41e09f is 50, key is test_row_0/C:col10/1734237568075/Put/seqid=0 2024-12-15T04:39:28,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742149_1325 (size=12301) 2024-12-15T04:39:29,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:29,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237629190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:29,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:29,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237629191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:29,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:29,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237629193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:29,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:29,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237629194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:29,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:29,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237629194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:29,303 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/0e49508d6d304f43b1c552c8ed41e09f 2024-12-15T04:39:29,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/55b206e3371d4c5c84e8b92e164846f5 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/55b206e3371d4c5c84e8b92e164846f5 2024-12-15T04:39:29,309 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/55b206e3371d4c5c84e8b92e164846f5, entries=150, sequenceid=319, filesize=12.0 K 2024-12-15T04:39:29,310 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/825050e6cc104c569cd319fe44745d00 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/825050e6cc104c569cd319fe44745d00 2024-12-15T04:39:29,313 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/825050e6cc104c569cd319fe44745d00, entries=150, sequenceid=319, filesize=12.0 K 2024-12-15T04:39:29,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/0e49508d6d304f43b1c552c8ed41e09f as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/0e49508d6d304f43b1c552c8ed41e09f 2024-12-15T04:39:29,316 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/0e49508d6d304f43b1c552c8ed41e09f, entries=150, sequenceid=319, filesize=12.0 K 2024-12-15T04:39:29,317 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 35d053ddd9bda1d702f0fa4863288c02 in 1242ms, sequenceid=319, compaction requested=true 2024-12-15T04:39:29,317 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:29,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:39:29,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:29,317 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:29,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:39:29,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:29,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:39:29,317 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:29,317 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:29,318 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:29,318 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/A is initiating minor compaction (all files) 2024-12-15T04:39:29,318 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/A in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:29,318 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/5c8643fb1b4e4e9596cd59235c4f5285, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/be449ed8543048b2b8c40c29bcda7ffa, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/55b206e3371d4c5c84e8b92e164846f5] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=36.7 K 2024-12-15T04:39:29,318 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:29,319 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5c8643fb1b4e4e9596cd59235c4f5285, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1734237564210 2024-12-15T04:39:29,319 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/B is initiating minor compaction (all files) 2024-12-15T04:39:29,319 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/B in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:29,319 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/75b9a43c624f4147948b55c260c20d45, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/306a323d8ca44b70ae0ae5cd646c56c1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/825050e6cc104c569cd319fe44745d00] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=36.7 K 2024-12-15T04:39:29,319 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting be449ed8543048b2b8c40c29bcda7ffa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1734237565331 2024-12-15T04:39:29,319 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 75b9a43c624f4147948b55c260c20d45, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1734237564210 2024-12-15T04:39:29,319 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55b206e3371d4c5c84e8b92e164846f5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1734237567465 2024-12-15T04:39:29,319 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 306a323d8ca44b70ae0ae5cd646c56c1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1734237565331 2024-12-15T04:39:29,320 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 825050e6cc104c569cd319fe44745d00, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1734237567465 2024-12-15T04:39:29,325 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#A#compaction#273 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:29,325 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/f003e5b20b554543b2cea97dc68a6e79 is 50, key is test_row_0/A:col10/1734237568075/Put/seqid=0 2024-12-15T04:39:29,327 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#B#compaction#274 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:29,327 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/800192ff7d4044caab6de2fee846f9f1 is 50, key is test_row_0/B:col10/1734237568075/Put/seqid=0 2024-12-15T04:39:29,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742150_1326 (size=13051) 2024-12-15T04:39:29,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742151_1327 (size=13051) 2024-12-15T04:39:29,741 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/800192ff7d4044caab6de2fee846f9f1 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/800192ff7d4044caab6de2fee846f9f1 2024-12-15T04:39:29,741 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/f003e5b20b554543b2cea97dc68a6e79 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f003e5b20b554543b2cea97dc68a6e79 2024-12-15T04:39:29,745 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/A of 35d053ddd9bda1d702f0fa4863288c02 into f003e5b20b554543b2cea97dc68a6e79(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:29,745 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:29,745 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/A, priority=13, startTime=1734237569317; duration=0sec 2024-12-15T04:39:29,745 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:29,745 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:A 2024-12-15T04:39:29,745 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:29,746 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/B of 35d053ddd9bda1d702f0fa4863288c02 into 800192ff7d4044caab6de2fee846f9f1(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:29,746 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:29,746 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/B, priority=13, startTime=1734237569317; duration=0sec 2024-12-15T04:39:29,746 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:29,746 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:B 2024-12-15T04:39:29,746 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:29,746 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/C is initiating minor compaction (all files) 2024-12-15T04:39:29,746 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/C in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:29,746 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/acc6ca55612644c1bacc14df7f884733, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/e8d80737dc6340d4bf36943962ef4f38, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/0e49508d6d304f43b1c552c8ed41e09f] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=36.7 K 2024-12-15T04:39:29,747 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting acc6ca55612644c1bacc14df7f884733, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1734237564210 2024-12-15T04:39:29,747 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8d80737dc6340d4bf36943962ef4f38, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=290, earliestPutTs=1734237565331 2024-12-15T04:39:29,747 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e49508d6d304f43b1c552c8ed41e09f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1734237567465 2024-12-15T04:39:29,753 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#C#compaction#275 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:29,753 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/d081433bc34a46429ed9eb16caa432f3 is 50, key is test_row_0/C:col10/1734237568075/Put/seqid=0 2024-12-15T04:39:29,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742152_1328 (size=13051) 2024-12-15T04:39:29,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-15T04:39:29,831 INFO [Thread-1199 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-12-15T04:39:29,832 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:39:29,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-12-15T04:39:29,833 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:39:29,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-15T04:39:29,834 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:39:29,834 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:39:29,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-15T04:39:29,985 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:29,986 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-12-15T04:39:29,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:29,986 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 35d053ddd9bda1d702f0fa4863288c02 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-15T04:39:29,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=A 2024-12-15T04:39:29,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:29,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=B 2024-12-15T04:39:29,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:29,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=C 2024-12-15T04:39:29,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:29,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/b4b00c7469e1468e8e5a21175bcef921 is 50, key is test_row_0/A:col10/1734237568080/Put/seqid=0 2024-12-15T04:39:29,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742153_1329 (size=12301) 2024-12-15T04:39:29,995 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/b4b00c7469e1468e8e5a21175bcef921 2024-12-15T04:39:30,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/8bbc0076e4704306aa7809fcb388e501 is 50, key is test_row_0/B:col10/1734237568080/Put/seqid=0 2024-12-15T04:39:30,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742154_1330 (size=12301) 2024-12-15T04:39:30,003 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/8bbc0076e4704306aa7809fcb388e501 2024-12-15T04:39:30,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/cb422bf94aba43729f772e48767a5ead is 50, key is test_row_0/C:col10/1734237568080/Put/seqid=0 2024-12-15T04:39:30,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742155_1331 (size=12301) 2024-12-15T04:39:30,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-15T04:39:30,164 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/d081433bc34a46429ed9eb16caa432f3 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/d081433bc34a46429ed9eb16caa432f3 2024-12-15T04:39:30,168 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/C of 35d053ddd9bda1d702f0fa4863288c02 into d081433bc34a46429ed9eb16caa432f3(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:30,168 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:30,168 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/C, priority=13, startTime=1734237569317; duration=0sec 2024-12-15T04:39:30,168 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:30,168 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:C 2024-12-15T04:39:30,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:30,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:30,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237630207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,209 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237630207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237630207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237630207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237630208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,311 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237630310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237630310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,312 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237630310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237630310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237630311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,417 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/cb422bf94aba43729f772e48767a5ead 2024-12-15T04:39:30,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/b4b00c7469e1468e8e5a21175bcef921 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/b4b00c7469e1468e8e5a21175bcef921 2024-12-15T04:39:30,423 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/b4b00c7469e1468e8e5a21175bcef921, entries=150, sequenceid=332, filesize=12.0 K 2024-12-15T04:39:30,424 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/8bbc0076e4704306aa7809fcb388e501 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/8bbc0076e4704306aa7809fcb388e501 2024-12-15T04:39:30,427 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/8bbc0076e4704306aa7809fcb388e501, entries=150, sequenceid=332, filesize=12.0 K 2024-12-15T04:39:30,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/cb422bf94aba43729f772e48767a5ead as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/cb422bf94aba43729f772e48767a5ead 2024-12-15T04:39:30,431 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/cb422bf94aba43729f772e48767a5ead, entries=150, sequenceid=332, filesize=12.0 K 2024-12-15T04:39:30,431 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 35d053ddd9bda1d702f0fa4863288c02 in 445ms, sequenceid=332, compaction requested=false 2024-12-15T04:39:30,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:30,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:30,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-12-15T04:39:30,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-12-15T04:39:30,434 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-12-15T04:39:30,434 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 599 msec 2024-12-15T04:39:30,435 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 602 msec 2024-12-15T04:39:30,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-12-15T04:39:30,435 INFO [Thread-1199 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-12-15T04:39:30,436 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:39:30,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-12-15T04:39:30,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-15T04:39:30,438 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:39:30,439 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:39:30,439 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:39:30,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:30,515 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 35d053ddd9bda1d702f0fa4863288c02 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-12-15T04:39:30,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=A 2024-12-15T04:39:30,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:30,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=B 2024-12-15T04:39:30,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:30,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=C 2024-12-15T04:39:30,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:30,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237630517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,519 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/f665c1d8aec84a618eeca5aba03385a6 is 50, key is test_row_0/A:col10/1734237570514/Put/seqid=0 2024-12-15T04:39:30,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237630518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237630518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237630519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237630519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742156_1332 (size=12301) 2024-12-15T04:39:30,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-15T04:39:30,540 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-15T04:39:30,593 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,593 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-15T04:39:30,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:30,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:30,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:30,594 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:30,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:30,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:30,620 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237630620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,622 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237630621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237630621, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237630622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237630622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-15T04:39:30,745 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,746 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-15T04:39:30,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:30,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:30,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:30,746 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:30,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:30,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:30,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237630821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,824 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237630824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237630824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237630824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:30,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237630825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,898 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:30,898 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-15T04:39:30,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:30,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:30,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:30,898 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:30,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:30,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:30,924 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=362 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/f665c1d8aec84a618eeca5aba03385a6 2024-12-15T04:39:30,930 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/b5a4fbc3ee2c427e969c2edac9d100fc is 50, key is test_row_0/B:col10/1734237570514/Put/seqid=0 2024-12-15T04:39:30,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742157_1333 (size=12301) 2024-12-15T04:39:31,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-15T04:39:31,050 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:31,050 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-15T04:39:31,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:31,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:31,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:31,050 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:31,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:31,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:31,126 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:31,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237631125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:31,126 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:31,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237631125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:31,126 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:31,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237631125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:31,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:31,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237631126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:31,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:31,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237631127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:31,202 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:31,203 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-15T04:39:31,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:31,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:31,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:31,203 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:31,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:31,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:31,333 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=362 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/b5a4fbc3ee2c427e969c2edac9d100fc 2024-12-15T04:39:31,338 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/8d536c8e7d4c4edd89fff0a17305c19f is 50, key is test_row_0/C:col10/1734237570514/Put/seqid=0 2024-12-15T04:39:31,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742158_1334 (size=12301) 2024-12-15T04:39:31,354 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:31,355 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-15T04:39:31,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:31,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:31,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:31,355 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:31,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:31,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:31,507 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:31,507 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-15T04:39:31,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:31,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:31,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:31,507 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:31,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:31,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:31,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-15T04:39:31,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:31,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49466 deadline: 1734237631627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:31,630 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:31,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49516 deadline: 1734237631629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:31,631 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:31,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49528 deadline: 1734237631630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:31,632 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:31,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49544 deadline: 1734237631630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:31,632 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:31,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49478 deadline: 1734237631630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:31,659 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:31,659 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-15T04:39:31,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:31,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:31,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:31,659 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:31,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:31,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:31,741 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=362 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/8d536c8e7d4c4edd89fff0a17305c19f 2024-12-15T04:39:31,745 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/f665c1d8aec84a618eeca5aba03385a6 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f665c1d8aec84a618eeca5aba03385a6 2024-12-15T04:39:31,748 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f665c1d8aec84a618eeca5aba03385a6, entries=150, sequenceid=362, filesize=12.0 K 2024-12-15T04:39:31,749 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/b5a4fbc3ee2c427e969c2edac9d100fc as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/b5a4fbc3ee2c427e969c2edac9d100fc 2024-12-15T04:39:31,752 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/b5a4fbc3ee2c427e969c2edac9d100fc, entries=150, sequenceid=362, filesize=12.0 K 2024-12-15T04:39:31,753 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/8d536c8e7d4c4edd89fff0a17305c19f as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/8d536c8e7d4c4edd89fff0a17305c19f 2024-12-15T04:39:31,755 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/8d536c8e7d4c4edd89fff0a17305c19f, entries=150, sequenceid=362, filesize=12.0 K 2024-12-15T04:39:31,756 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=33.54 KB/34350 for 35d053ddd9bda1d702f0fa4863288c02 in 1242ms, sequenceid=362, compaction requested=true 2024-12-15T04:39:31,756 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:31,756 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:31,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:39:31,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:31,757 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:31,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:39:31,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:31,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 35d053ddd9bda1d702f0fa4863288c02:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:39:31,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:31,757 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:31,757 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/A is initiating minor compaction (all files) 2024-12-15T04:39:31,757 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:31,757 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/A in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:31,757 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/B is initiating minor compaction (all files) 2024-12-15T04:39:31,757 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/B in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:31,757 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f003e5b20b554543b2cea97dc68a6e79, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/b4b00c7469e1468e8e5a21175bcef921, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f665c1d8aec84a618eeca5aba03385a6] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=36.8 K 2024-12-15T04:39:31,758 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/800192ff7d4044caab6de2fee846f9f1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/8bbc0076e4704306aa7809fcb388e501, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/b5a4fbc3ee2c427e969c2edac9d100fc] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=36.8 K 2024-12-15T04:39:31,758 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting f003e5b20b554543b2cea97dc68a6e79, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1734237567465 2024-12-15T04:39:31,758 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 800192ff7d4044caab6de2fee846f9f1, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1734237567465 2024-12-15T04:39:31,758 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4b00c7469e1468e8e5a21175bcef921, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1734237568078 2024-12-15T04:39:31,758 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 8bbc0076e4704306aa7809fcb388e501, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1734237568078 2024-12-15T04:39:31,758 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting b5a4fbc3ee2c427e969c2edac9d100fc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1734237570513 2024-12-15T04:39:31,758 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting f665c1d8aec84a618eeca5aba03385a6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1734237570513 2024-12-15T04:39:31,764 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#A#compaction#282 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:31,765 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/09d27f3e9d0242358fea18f76d0b0e5a is 50, key is test_row_0/A:col10/1734237570514/Put/seqid=0 2024-12-15T04:39:31,766 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#B#compaction#283 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:31,766 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/a9285ea92094456693e8ac986b0090ad is 50, key is test_row_0/B:col10/1734237570514/Put/seqid=0 2024-12-15T04:39:31,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742159_1335 (size=13153) 2024-12-15T04:39:31,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742160_1336 (size=13153) 2024-12-15T04:39:31,811 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:31,812 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-12-15T04:39:31,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:31,812 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 35d053ddd9bda1d702f0fa4863288c02 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-15T04:39:31,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=A 2024-12-15T04:39:31,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:31,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=B 2024-12-15T04:39:31,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:31,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=C 2024-12-15T04:39:31,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:31,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/709528a7663945769f929b837ddc0572 is 50, key is test_row_0/A:col10/1734237570518/Put/seqid=0 2024-12-15T04:39:31,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742161_1337 (size=12301) 2024-12-15T04:39:31,975 DEBUG [Thread-1208 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2b976e1a to 127.0.0.1:55935 2024-12-15T04:39:31,975 DEBUG [Thread-1208 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:39:31,975 DEBUG [Thread-1206 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x68ad882f to 127.0.0.1:55935 2024-12-15T04:39:31,975 DEBUG [Thread-1206 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:39:31,976 DEBUG [Thread-1200 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x10e6bf6a to 127.0.0.1:55935 2024-12-15T04:39:31,976 DEBUG [Thread-1200 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:39:31,978 DEBUG [Thread-1202 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1730a60f to 127.0.0.1:55935 2024-12-15T04:39:31,978 DEBUG [Thread-1204 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x598cfed4 to 127.0.0.1:55935 2024-12-15T04:39:31,978 DEBUG [Thread-1204 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:39:31,978 DEBUG [Thread-1202 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:39:32,201 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/09d27f3e9d0242358fea18f76d0b0e5a as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/09d27f3e9d0242358fea18f76d0b0e5a 2024-12-15T04:39:32,201 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/a9285ea92094456693e8ac986b0090ad as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/a9285ea92094456693e8ac986b0090ad 2024-12-15T04:39:32,206 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/B of 35d053ddd9bda1d702f0fa4863288c02 into a9285ea92094456693e8ac986b0090ad(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:32,206 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/A of 35d053ddd9bda1d702f0fa4863288c02 into 09d27f3e9d0242358fea18f76d0b0e5a(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:32,206 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:32,206 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:32,206 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/B, priority=13, startTime=1734237571757; duration=0sec 2024-12-15T04:39:32,206 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/A, priority=13, startTime=1734237571756; duration=0sec 2024-12-15T04:39:32,206 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:32,206 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:B 2024-12-15T04:39:32,206 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:32,206 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:A 2024-12-15T04:39:32,206 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:32,207 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:32,207 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 35d053ddd9bda1d702f0fa4863288c02/C is initiating minor compaction (all files) 2024-12-15T04:39:32,208 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 35d053ddd9bda1d702f0fa4863288c02/C in TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:32,208 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/d081433bc34a46429ed9eb16caa432f3, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/cb422bf94aba43729f772e48767a5ead, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/8d536c8e7d4c4edd89fff0a17305c19f] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp, totalSize=36.8 K 2024-12-15T04:39:32,208 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting d081433bc34a46429ed9eb16caa432f3, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1734237567465 2024-12-15T04:39:32,208 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting cb422bf94aba43729f772e48767a5ead, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1734237568078 2024-12-15T04:39:32,209 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d536c8e7d4c4edd89fff0a17305c19f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=362, earliestPutTs=1734237570513 2024-12-15T04:39:32,216 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 35d053ddd9bda1d702f0fa4863288c02#C#compaction#285 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:32,216 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/bdb6e4039dae48bb904fd1c72f456967 is 50, key is test_row_0/C:col10/1734237570514/Put/seqid=0 2024-12-15T04:39:32,220 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/709528a7663945769f929b837ddc0572 2024-12-15T04:39:32,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742162_1338 (size=13153) 2024-12-15T04:39:32,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/79c8a22328cc4092af3d9be3800134af is 50, key is test_row_0/B:col10/1734237570518/Put/seqid=0 2024-12-15T04:39:32,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742163_1339 (size=12301) 2024-12-15T04:39:32,230 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/79c8a22328cc4092af3d9be3800134af 2024-12-15T04:39:32,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/78daadfdb2104137babdc601d2ed5fb1 is 50, key is test_row_0/C:col10/1734237570518/Put/seqid=0 2024-12-15T04:39:32,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742164_1340 (size=12301) 2024-12-15T04:39:32,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-15T04:39:32,632 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/bdb6e4039dae48bb904fd1c72f456967 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/bdb6e4039dae48bb904fd1c72f456967 2024-12-15T04:39:32,637 DEBUG [Thread-1197 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x131ceb8f to 127.0.0.1:55935 2024-12-15T04:39:32,637 DEBUG [Thread-1197 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:39:32,637 DEBUG [Thread-1193 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5886c0f2 to 127.0.0.1:55935 2024-12-15T04:39:32,637 DEBUG [Thread-1193 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:39:32,638 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=370 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/78daadfdb2104137babdc601d2ed5fb1 2024-12-15T04:39:32,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:32,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. as already flushing 2024-12-15T04:39:32,639 DEBUG [Thread-1195 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x66e575aa to 127.0.0.1:55935 2024-12-15T04:39:32,639 DEBUG [Thread-1195 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:39:32,640 DEBUG [Thread-1189 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x43f04e0e to 127.0.0.1:55935 2024-12-15T04:39:32,640 DEBUG [Thread-1189 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:39:32,641 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 35d053ddd9bda1d702f0fa4863288c02/C of 35d053ddd9bda1d702f0fa4863288c02 into bdb6e4039dae48bb904fd1c72f456967(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:32,641 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:32,641 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02., storeName=35d053ddd9bda1d702f0fa4863288c02/C, priority=13, startTime=1734237571757; duration=0sec 2024-12-15T04:39:32,641 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:32,641 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 35d053ddd9bda1d702f0fa4863288c02:C 2024-12-15T04:39:32,641 DEBUG [Thread-1191 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x560ec309 to 127.0.0.1:55935 2024-12-15T04:39:32,641 DEBUG [Thread-1191 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:39:32,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/709528a7663945769f929b837ddc0572 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/709528a7663945769f929b837ddc0572 2024-12-15T04:39:32,646 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/709528a7663945769f929b837ddc0572, entries=150, sequenceid=370, filesize=12.0 K 2024-12-15T04:39:32,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/79c8a22328cc4092af3d9be3800134af as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/79c8a22328cc4092af3d9be3800134af 2024-12-15T04:39:32,649 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/79c8a22328cc4092af3d9be3800134af, entries=150, sequenceid=370, filesize=12.0 K 2024-12-15T04:39:32,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/78daadfdb2104137babdc601d2ed5fb1 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/78daadfdb2104137babdc601d2ed5fb1 2024-12-15T04:39:32,653 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/78daadfdb2104137babdc601d2ed5fb1, entries=150, sequenceid=370, filesize=12.0 K 2024-12-15T04:39:32,653 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=33.54 KB/34350 for 35d053ddd9bda1d702f0fa4863288c02 in 841ms, sequenceid=370, compaction requested=false 2024-12-15T04:39:32,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:32,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:32,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-12-15T04:39:32,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-12-15T04:39:32,655 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-12-15T04:39:32,655 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2150 sec 2024-12-15T04:39:32,656 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 2.2190 sec 2024-12-15T04:39:34,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-12-15T04:39:34,544 INFO [Thread-1199 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-12-15T04:39:34,545 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-15T04:39:34,545 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 58 2024-12-15T04:39:34,545 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 52 2024-12-15T04:39:34,545 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 63 2024-12-15T04:39:34,545 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-12-15T04:39:34,545 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 62 2024-12-15T04:39:34,545 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-15T04:39:34,545 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8796 2024-12-15T04:39:34,545 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8892 2024-12-15T04:39:34,545 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8394 2024-12-15T04:39:34,545 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8793 2024-12-15T04:39:34,545 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8900 2024-12-15T04:39:34,545 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-15T04:39:34,545 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-15T04:39:34,546 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08d0caa5 to 127.0.0.1:55935 2024-12-15T04:39:34,546 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:39:34,547 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-15T04:39:34,548 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-15T04:39:34,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-15T04:39:34,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-15T04:39:34,554 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237574554"}]},"ts":"1734237574554"} 2024-12-15T04:39:34,555 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-15T04:39:34,596 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-15T04:39:34,597 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-15T04:39:34,599 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=35d053ddd9bda1d702f0fa4863288c02, UNASSIGN}] 2024-12-15T04:39:34,600 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=89, ppid=88, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=35d053ddd9bda1d702f0fa4863288c02, UNASSIGN 2024-12-15T04:39:34,601 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=89 updating hbase:meta row=35d053ddd9bda1d702f0fa4863288c02, regionState=CLOSING, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:34,604 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T04:39:34,604 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; CloseRegionProcedure 35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035}] 2024-12-15T04:39:34,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-15T04:39:34,756 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:34,757 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] handler.UnassignRegionHandler(124): Close 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:34,757 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T04:39:34,758 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1681): Closing 35d053ddd9bda1d702f0fa4863288c02, disabling compactions & flushes 2024-12-15T04:39:34,758 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:34,758 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:34,758 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. after waiting 0 ms 2024-12-15T04:39:34,758 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:34,758 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(2837): Flushing 35d053ddd9bda1d702f0fa4863288c02 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-15T04:39:34,758 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=A 2024-12-15T04:39:34,759 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:34,759 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=B 2024-12-15T04:39:34,759 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:34,759 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 35d053ddd9bda1d702f0fa4863288c02, store=C 2024-12-15T04:39:34,759 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:34,767 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/ada63dc83708476d8d0c978b94b304e6 is 50, key is test_row_0/A:col10/1734237572640/Put/seqid=0 2024-12-15T04:39:34,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742165_1341 (size=12301) 2024-12-15T04:39:34,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-15T04:39:35,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-15T04:39:35,171 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/ada63dc83708476d8d0c978b94b304e6 2024-12-15T04:39:35,183 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/a9fee34609d64a9bb1b98f2ef45c7fdf is 50, key is test_row_0/B:col10/1734237572640/Put/seqid=0 2024-12-15T04:39:35,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742166_1342 (size=12301) 2024-12-15T04:39:35,593 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/a9fee34609d64a9bb1b98f2ef45c7fdf 2024-12-15T04:39:35,606 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/6ee6a1c45e994f33a8fecc4382de1642 is 50, key is test_row_0/C:col10/1734237572640/Put/seqid=0 2024-12-15T04:39:35,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742167_1343 (size=12301) 2024-12-15T04:39:35,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-15T04:39:36,012 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/6ee6a1c45e994f33a8fecc4382de1642 2024-12-15T04:39:36,021 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/A/ada63dc83708476d8d0c978b94b304e6 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/ada63dc83708476d8d0c978b94b304e6 2024-12-15T04:39:36,026 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/ada63dc83708476d8d0c978b94b304e6, entries=150, sequenceid=381, filesize=12.0 K 2024-12-15T04:39:36,027 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/B/a9fee34609d64a9bb1b98f2ef45c7fdf as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/a9fee34609d64a9bb1b98f2ef45c7fdf 2024-12-15T04:39:36,031 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/a9fee34609d64a9bb1b98f2ef45c7fdf, entries=150, sequenceid=381, filesize=12.0 K 2024-12-15T04:39:36,032 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/.tmp/C/6ee6a1c45e994f33a8fecc4382de1642 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/6ee6a1c45e994f33a8fecc4382de1642 2024-12-15T04:39:36,037 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/6ee6a1c45e994f33a8fecc4382de1642, entries=150, sequenceid=381, filesize=12.0 K 2024-12-15T04:39:36,038 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 35d053ddd9bda1d702f0fa4863288c02 in 1280ms, sequenceid=381, compaction requested=true 2024-12-15T04:39:36,038 DEBUG [StoreCloser-TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/ba7c9248e681436780f1735545c7d41e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/1d58181acfb949d696c1d0cb13b269d1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/b1fe62e2822b469ca648599e5e4b5ef4, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/0513b2f6c7964a7ea13637639d268e73, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/789b4514c66a472d838c84f451bf32b8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f01ee7d53864474cb944fd32c4d912fc, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f67d91098c1a4154a030a119c7bb3006, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/39519790197d42e49dece1c259b34ea0, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f10853df19204824ac28b02ed8bfbf8d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/772f37081fd74e60b654863977b4856d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/14ad8a9e44f24c559d49c157a54dd8e3, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/a115a8306a4847a09b25e4953a355e1e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/494c49b6b0d74862a0b037d94c97fbb6, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/0133f167435648e1b80705964ce06bfc, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/1521474aa25b4731b70138a0dd0be0d6, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/ab9bb3552cf54143bb4ef0af94d2a02b, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/7b4c5b05df3443e3abc4dc9759b6e758, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/2504bc65377d4a35ab784abf0435dff8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/5c8643fb1b4e4e9596cd59235c4f5285, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/19a5614622364abb93ca9d5139d35c50, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/be449ed8543048b2b8c40c29bcda7ffa, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f003e5b20b554543b2cea97dc68a6e79, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/55b206e3371d4c5c84e8b92e164846f5, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/b4b00c7469e1468e8e5a21175bcef921, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f665c1d8aec84a618eeca5aba03385a6] to archive 2024-12-15T04:39:36,039 DEBUG [StoreCloser-TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-15T04:39:36,041 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/ba7c9248e681436780f1735545c7d41e to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/ba7c9248e681436780f1735545c7d41e 2024-12-15T04:39:36,041 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/b1fe62e2822b469ca648599e5e4b5ef4 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/b1fe62e2822b469ca648599e5e4b5ef4 2024-12-15T04:39:36,042 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/789b4514c66a472d838c84f451bf32b8 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/789b4514c66a472d838c84f451bf32b8 2024-12-15T04:39:36,042 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/1d58181acfb949d696c1d0cb13b269d1 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/1d58181acfb949d696c1d0cb13b269d1 2024-12-15T04:39:36,042 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f01ee7d53864474cb944fd32c4d912fc to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f01ee7d53864474cb944fd32c4d912fc 2024-12-15T04:39:36,042 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f67d91098c1a4154a030a119c7bb3006 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f67d91098c1a4154a030a119c7bb3006 2024-12-15T04:39:36,042 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/0513b2f6c7964a7ea13637639d268e73 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/0513b2f6c7964a7ea13637639d268e73 2024-12-15T04:39:36,042 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/39519790197d42e49dece1c259b34ea0 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/39519790197d42e49dece1c259b34ea0 2024-12-15T04:39:36,044 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f10853df19204824ac28b02ed8bfbf8d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f10853df19204824ac28b02ed8bfbf8d 2024-12-15T04:39:36,044 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/a115a8306a4847a09b25e4953a355e1e to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/a115a8306a4847a09b25e4953a355e1e 2024-12-15T04:39:36,044 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/772f37081fd74e60b654863977b4856d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/772f37081fd74e60b654863977b4856d 2024-12-15T04:39:36,044 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/0133f167435648e1b80705964ce06bfc to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/0133f167435648e1b80705964ce06bfc 2024-12-15T04:39:36,044 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/14ad8a9e44f24c559d49c157a54dd8e3 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/14ad8a9e44f24c559d49c157a54dd8e3 2024-12-15T04:39:36,044 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/494c49b6b0d74862a0b037d94c97fbb6 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/494c49b6b0d74862a0b037d94c97fbb6 2024-12-15T04:39:36,044 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/1521474aa25b4731b70138a0dd0be0d6 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/1521474aa25b4731b70138a0dd0be0d6 2024-12-15T04:39:36,044 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/ab9bb3552cf54143bb4ef0af94d2a02b to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/ab9bb3552cf54143bb4ef0af94d2a02b 2024-12-15T04:39:36,045 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/7b4c5b05df3443e3abc4dc9759b6e758 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/7b4c5b05df3443e3abc4dc9759b6e758 2024-12-15T04:39:36,046 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/5c8643fb1b4e4e9596cd59235c4f5285 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/5c8643fb1b4e4e9596cd59235c4f5285 2024-12-15T04:39:36,046 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/be449ed8543048b2b8c40c29bcda7ffa to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/be449ed8543048b2b8c40c29bcda7ffa 2024-12-15T04:39:36,046 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f003e5b20b554543b2cea97dc68a6e79 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f003e5b20b554543b2cea97dc68a6e79 2024-12-15T04:39:36,046 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/19a5614622364abb93ca9d5139d35c50 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/19a5614622364abb93ca9d5139d35c50 2024-12-15T04:39:36,046 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/2504bc65377d4a35ab784abf0435dff8 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/2504bc65377d4a35ab784abf0435dff8 2024-12-15T04:39:36,046 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/55b206e3371d4c5c84e8b92e164846f5 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/55b206e3371d4c5c84e8b92e164846f5 2024-12-15T04:39:36,046 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/b4b00c7469e1468e8e5a21175bcef921 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/b4b00c7469e1468e8e5a21175bcef921 2024-12-15T04:39:36,047 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f665c1d8aec84a618eeca5aba03385a6 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/f665c1d8aec84a618eeca5aba03385a6 2024-12-15T04:39:36,048 DEBUG [StoreCloser-TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/d8f26fb5f1e04e7bafc07b330eeb2dd6, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/302c50c86e774b26a050cb17816979fe, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/2ad5322f9e2642468fc57e207f24ea78, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/8d8dcb296b4349d2af6de3002e57d19d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/55108c236a864db2b5352a8b87bf6cbf, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/c7220c9ccdd844659a25915ba7fc628b, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/1abfa815e55548d988a14918da2908d9, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/06e72db66d3d4f198db0d1f9a88f1fd3, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/876810db090c4c689c2a5d1717ce5640, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/adecd13f5a044b0696e385c1ded72957, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/c9dfc561c1e14f1da16c1da47b6f1b07, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/8018c73d683c43b29d5d6ed0f417adaf, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/d6a640e6e1d14dc8a0e53ee44e3296c1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/6fbb9d4539c047478dfaafdbe7938712, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/cc918cfc8db54efc8c2868631653b4b7, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/5dbab4b7afd94d11843d9f46c6471ecd, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/103882d1f43c4bcc8a5a3dcbfdfa566a, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/f3d9599ce8984889a4eb48069bd067e8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/75b9a43c624f4147948b55c260c20d45, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/25c77a7565c54b71b66b3edc9dfb8fd1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/306a323d8ca44b70ae0ae5cd646c56c1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/800192ff7d4044caab6de2fee846f9f1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/825050e6cc104c569cd319fe44745d00, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/8bbc0076e4704306aa7809fcb388e501, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/b5a4fbc3ee2c427e969c2edac9d100fc] to archive 2024-12-15T04:39:36,049 DEBUG [StoreCloser-TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-15T04:39:36,050 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/302c50c86e774b26a050cb17816979fe to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/302c50c86e774b26a050cb17816979fe 2024-12-15T04:39:36,050 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/d8f26fb5f1e04e7bafc07b330eeb2dd6 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/d8f26fb5f1e04e7bafc07b330eeb2dd6 2024-12-15T04:39:36,051 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/2ad5322f9e2642468fc57e207f24ea78 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/2ad5322f9e2642468fc57e207f24ea78 2024-12-15T04:39:36,051 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/55108c236a864db2b5352a8b87bf6cbf to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/55108c236a864db2b5352a8b87bf6cbf 2024-12-15T04:39:36,051 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/8d8dcb296b4349d2af6de3002e57d19d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/8d8dcb296b4349d2af6de3002e57d19d 2024-12-15T04:39:36,051 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/06e72db66d3d4f198db0d1f9a88f1fd3 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/06e72db66d3d4f198db0d1f9a88f1fd3 2024-12-15T04:39:36,051 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/1abfa815e55548d988a14918da2908d9 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/1abfa815e55548d988a14918da2908d9 2024-12-15T04:39:36,051 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/c7220c9ccdd844659a25915ba7fc628b to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/c7220c9ccdd844659a25915ba7fc628b 2024-12-15T04:39:36,052 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/876810db090c4c689c2a5d1717ce5640 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/876810db090c4c689c2a5d1717ce5640 2024-12-15T04:39:36,052 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/adecd13f5a044b0696e385c1ded72957 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/adecd13f5a044b0696e385c1ded72957 2024-12-15T04:39:36,052 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/c9dfc561c1e14f1da16c1da47b6f1b07 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/c9dfc561c1e14f1da16c1da47b6f1b07 2024-12-15T04:39:36,052 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/8018c73d683c43b29d5d6ed0f417adaf to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/8018c73d683c43b29d5d6ed0f417adaf 2024-12-15T04:39:36,052 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/6fbb9d4539c047478dfaafdbe7938712 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/6fbb9d4539c047478dfaafdbe7938712 2024-12-15T04:39:36,053 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/5dbab4b7afd94d11843d9f46c6471ecd to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/5dbab4b7afd94d11843d9f46c6471ecd 2024-12-15T04:39:36,053 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/d6a640e6e1d14dc8a0e53ee44e3296c1 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/d6a640e6e1d14dc8a0e53ee44e3296c1 2024-12-15T04:39:36,053 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/cc918cfc8db54efc8c2868631653b4b7 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/cc918cfc8db54efc8c2868631653b4b7 2024-12-15T04:39:36,053 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/103882d1f43c4bcc8a5a3dcbfdfa566a to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/103882d1f43c4bcc8a5a3dcbfdfa566a 2024-12-15T04:39:36,053 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/75b9a43c624f4147948b55c260c20d45 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/75b9a43c624f4147948b55c260c20d45 2024-12-15T04:39:36,054 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/25c77a7565c54b71b66b3edc9dfb8fd1 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/25c77a7565c54b71b66b3edc9dfb8fd1 2024-12-15T04:39:36,054 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/f3d9599ce8984889a4eb48069bd067e8 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/f3d9599ce8984889a4eb48069bd067e8 2024-12-15T04:39:36,054 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/306a323d8ca44b70ae0ae5cd646c56c1 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/306a323d8ca44b70ae0ae5cd646c56c1 2024-12-15T04:39:36,054 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/825050e6cc104c569cd319fe44745d00 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/825050e6cc104c569cd319fe44745d00 2024-12-15T04:39:36,054 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/800192ff7d4044caab6de2fee846f9f1 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/800192ff7d4044caab6de2fee846f9f1 2024-12-15T04:39:36,054 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/8bbc0076e4704306aa7809fcb388e501 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/8bbc0076e4704306aa7809fcb388e501 2024-12-15T04:39:36,054 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/b5a4fbc3ee2c427e969c2edac9d100fc to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/b5a4fbc3ee2c427e969c2edac9d100fc 2024-12-15T04:39:36,055 DEBUG [StoreCloser-TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/0c617e09cf6543c894dd61f5bba30b30, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/39c124cff36a4c5b9a1f026cff8efdb6, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/275eaa3f5ca04c5f86dcc9b394025424, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/52bb44e3047b404b99b37bd31c9c0609, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/a92449404cf74a7fb694f7d0a24c7ee4, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/ff4dd7e0f5ae45ad9457288bcbf9d150, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/13cd456cd240479f94cb1691dc91b5ed, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/15883e7afae24e6187af97f5d1374e2d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/d43fefac83634900807ba422a8b377a7, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/5d9cc14312fe42308b056378aa49cc09, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/2fb53dbc6c1c43b7b67a3b219586ec36, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/ccc0c76ac6274f0284596fa5763b211b, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/9c7a557cfe764654aa578fcbe5f19318, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/ab02d1ce0abc4af9be09cc6d0892ebed, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/8243248267b54bd58186c9494f784564, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/0e11571deb1c4ea3a9186ad8d0bce206, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/8ddf7443c5e945e9b40983708be741cd, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/0dac446ff3dd4d20a614b6067cd58f7c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/acc6ca55612644c1bacc14df7f884733, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/e836ac86903c4ae2849834e8b4693a80, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/e8d80737dc6340d4bf36943962ef4f38, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/d081433bc34a46429ed9eb16caa432f3, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/0e49508d6d304f43b1c552c8ed41e09f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/cb422bf94aba43729f772e48767a5ead, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/8d536c8e7d4c4edd89fff0a17305c19f] to archive 2024-12-15T04:39:36,056 DEBUG [StoreCloser-TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-15T04:39:36,057 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/275eaa3f5ca04c5f86dcc9b394025424 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/275eaa3f5ca04c5f86dcc9b394025424 2024-12-15T04:39:36,058 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/0c617e09cf6543c894dd61f5bba30b30 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/0c617e09cf6543c894dd61f5bba30b30 2024-12-15T04:39:36,058 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/52bb44e3047b404b99b37bd31c9c0609 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/52bb44e3047b404b99b37bd31c9c0609 2024-12-15T04:39:36,058 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/15883e7afae24e6187af97f5d1374e2d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/15883e7afae24e6187af97f5d1374e2d 2024-12-15T04:39:36,058 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/13cd456cd240479f94cb1691dc91b5ed to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/13cd456cd240479f94cb1691dc91b5ed 2024-12-15T04:39:36,058 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/39c124cff36a4c5b9a1f026cff8efdb6 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/39c124cff36a4c5b9a1f026cff8efdb6 2024-12-15T04:39:36,058 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/a92449404cf74a7fb694f7d0a24c7ee4 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/a92449404cf74a7fb694f7d0a24c7ee4 2024-12-15T04:39:36,058 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/ff4dd7e0f5ae45ad9457288bcbf9d150 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/ff4dd7e0f5ae45ad9457288bcbf9d150 2024-12-15T04:39:36,059 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/ccc0c76ac6274f0284596fa5763b211b to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/ccc0c76ac6274f0284596fa5763b211b 2024-12-15T04:39:36,059 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/d43fefac83634900807ba422a8b377a7 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/d43fefac83634900807ba422a8b377a7 2024-12-15T04:39:36,059 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/9c7a557cfe764654aa578fcbe5f19318 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/9c7a557cfe764654aa578fcbe5f19318 2024-12-15T04:39:36,059 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/ab02d1ce0abc4af9be09cc6d0892ebed to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/ab02d1ce0abc4af9be09cc6d0892ebed 2024-12-15T04:39:36,060 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/5d9cc14312fe42308b056378aa49cc09 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/5d9cc14312fe42308b056378aa49cc09 2024-12-15T04:39:36,060 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/2fb53dbc6c1c43b7b67a3b219586ec36 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/2fb53dbc6c1c43b7b67a3b219586ec36 2024-12-15T04:39:36,060 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/0e11571deb1c4ea3a9186ad8d0bce206 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/0e11571deb1c4ea3a9186ad8d0bce206 2024-12-15T04:39:36,060 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/8243248267b54bd58186c9494f784564 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/8243248267b54bd58186c9494f784564 2024-12-15T04:39:36,061 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/e836ac86903c4ae2849834e8b4693a80 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/e836ac86903c4ae2849834e8b4693a80 2024-12-15T04:39:36,061 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/8ddf7443c5e945e9b40983708be741cd to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/8ddf7443c5e945e9b40983708be741cd 2024-12-15T04:39:36,061 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/acc6ca55612644c1bacc14df7f884733 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/acc6ca55612644c1bacc14df7f884733 2024-12-15T04:39:36,061 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/0dac446ff3dd4d20a614b6067cd58f7c to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/0dac446ff3dd4d20a614b6067cd58f7c 2024-12-15T04:39:36,061 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/e8d80737dc6340d4bf36943962ef4f38 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/e8d80737dc6340d4bf36943962ef4f38 2024-12-15T04:39:36,061 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/d081433bc34a46429ed9eb16caa432f3 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/d081433bc34a46429ed9eb16caa432f3 2024-12-15T04:39:36,061 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/cb422bf94aba43729f772e48767a5ead to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/cb422bf94aba43729f772e48767a5ead 2024-12-15T04:39:36,062 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/0e49508d6d304f43b1c552c8ed41e09f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/0e49508d6d304f43b1c552c8ed41e09f 2024-12-15T04:39:36,062 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/8d536c8e7d4c4edd89fff0a17305c19f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/8d536c8e7d4c4edd89fff0a17305c19f 2024-12-15T04:39:36,065 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/recovered.edits/384.seqid, newMaxSeqId=384, maxSeqId=1 2024-12-15T04:39:36,066 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02. 2024-12-15T04:39:36,066 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] regionserver.HRegion(1635): Region close journal for 35d053ddd9bda1d702f0fa4863288c02: 2024-12-15T04:39:36,067 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=90}] handler.UnassignRegionHandler(170): Closed 35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:36,067 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=89 updating hbase:meta row=35d053ddd9bda1d702f0fa4863288c02, regionState=CLOSED 2024-12-15T04:39:36,069 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-15T04:39:36,069 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; CloseRegionProcedure 35d053ddd9bda1d702f0fa4863288c02, server=e56de37b85b3,43199,1734237482035 in 1.4640 sec 2024-12-15T04:39:36,070 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=89, resume processing ppid=88 2024-12-15T04:39:36,070 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, ppid=88, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=35d053ddd9bda1d702f0fa4863288c02, UNASSIGN in 1.4700 sec 2024-12-15T04:39:36,071 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-12-15T04:39:36,071 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4730 sec 2024-12-15T04:39:36,071 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237576071"}]},"ts":"1734237576071"} 2024-12-15T04:39:36,072 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-15T04:39:36,103 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-15T04:39:36,105 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5560 sec 2024-12-15T04:39:36,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-12-15T04:39:36,661 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-12-15T04:39:36,663 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-15T04:39:36,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:39:36,666 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=91, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:39:36,668 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=91, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:39:36,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-15T04:39:36,671 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:36,676 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A, FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B, FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C, FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/recovered.edits] 2024-12-15T04:39:36,681 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/ada63dc83708476d8d0c978b94b304e6 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/ada63dc83708476d8d0c978b94b304e6 2024-12-15T04:39:36,681 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/09d27f3e9d0242358fea18f76d0b0e5a to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/09d27f3e9d0242358fea18f76d0b0e5a 2024-12-15T04:39:36,681 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/709528a7663945769f929b837ddc0572 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/A/709528a7663945769f929b837ddc0572 2024-12-15T04:39:36,686 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/79c8a22328cc4092af3d9be3800134af to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/79c8a22328cc4092af3d9be3800134af 2024-12-15T04:39:36,686 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/a9fee34609d64a9bb1b98f2ef45c7fdf to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/a9fee34609d64a9bb1b98f2ef45c7fdf 2024-12-15T04:39:36,686 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/a9285ea92094456693e8ac986b0090ad to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/B/a9285ea92094456693e8ac986b0090ad 2024-12-15T04:39:36,690 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/6ee6a1c45e994f33a8fecc4382de1642 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/6ee6a1c45e994f33a8fecc4382de1642 2024-12-15T04:39:36,691 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/bdb6e4039dae48bb904fd1c72f456967 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/bdb6e4039dae48bb904fd1c72f456967 2024-12-15T04:39:36,691 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/78daadfdb2104137babdc601d2ed5fb1 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/C/78daadfdb2104137babdc601d2ed5fb1 2024-12-15T04:39:36,694 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/recovered.edits/384.seqid to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02/recovered.edits/384.seqid 2024-12-15T04:39:36,695 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/35d053ddd9bda1d702f0fa4863288c02 2024-12-15T04:39:36,695 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-15T04:39:36,697 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=91, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:39:36,701 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-15T04:39:36,703 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-15T04:39:36,704 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=91, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:39:36,704 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-15T04:39:36,704 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734237576704"}]},"ts":"9223372036854775807"} 2024-12-15T04:39:36,706 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-15T04:39:36,706 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 35d053ddd9bda1d702f0fa4863288c02, NAME => 'TestAcidGuarantees,,1734237549492.35d053ddd9bda1d702f0fa4863288c02.', STARTKEY => '', ENDKEY => ''}] 2024-12-15T04:39:36,706 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-15T04:39:36,706 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734237576706"}]},"ts":"9223372036854775807"} 2024-12-15T04:39:36,708 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-15T04:39:36,753 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=91, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:39:36,755 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 91 msec 2024-12-15T04:39:36,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-12-15T04:39:36,770 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-12-15T04:39:36,784 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testGetAtomicity Thread=246 (was 244) - Thread LEAK? -, OpenFileDescriptor=457 (was 451) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=322 (was 326), ProcessCount=11 (was 11), AvailableMemoryMB=4498 (was 4543) 2024-12-15T04:39:36,794 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=246, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=322, ProcessCount=11, AvailableMemoryMB=4497 2024-12-15T04:39:36,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-15T04:39:36,796 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-15T04:39:36,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=92, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-15T04:39:36,798 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-15T04:39:36,798 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:36,798 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 92 2024-12-15T04:39:36,799 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-15T04:39:36,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-15T04:39:36,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742168_1344 (size=963) 2024-12-15T04:39:36,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-15T04:39:37,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-15T04:39:37,211 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9 2024-12-15T04:39:37,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742169_1345 (size=53) 2024-12-15T04:39:37,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-15T04:39:37,622 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T04:39:37,622 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing bfdbd4565c936c59a93e348f03cec823, disabling compactions & flushes 2024-12-15T04:39:37,622 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:37,622 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:37,622 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. after waiting 0 ms 2024-12-15T04:39:37,622 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:37,622 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:37,622 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:37,624 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-15T04:39:37,625 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734237577624"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734237577624"}]},"ts":"1734237577624"} 2024-12-15T04:39:37,627 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-15T04:39:37,629 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-15T04:39:37,629 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237577629"}]},"ts":"1734237577629"} 2024-12-15T04:39:37,631 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-15T04:39:37,696 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bfdbd4565c936c59a93e348f03cec823, ASSIGN}] 2024-12-15T04:39:37,698 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bfdbd4565c936c59a93e348f03cec823, ASSIGN 2024-12-15T04:39:37,699 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=93, ppid=92, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=bfdbd4565c936c59a93e348f03cec823, ASSIGN; state=OFFLINE, location=e56de37b85b3,43199,1734237482035; forceNewPlan=false, retain=false 2024-12-15T04:39:37,850 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=bfdbd4565c936c59a93e348f03cec823, regionState=OPENING, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:37,853 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE; OpenRegionProcedure bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035}] 2024-12-15T04:39:37,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-15T04:39:38,007 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:38,013 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:38,013 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(7285): Opening region: {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} 2024-12-15T04:39:38,014 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:38,014 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T04:39:38,014 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(7327): checking encryption for bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:38,014 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(7330): checking classloading for bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:38,016 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:38,018 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:39:38,018 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bfdbd4565c936c59a93e348f03cec823 columnFamilyName A 2024-12-15T04:39:38,018 DEBUG [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:38,019 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] regionserver.HStore(327): Store=bfdbd4565c936c59a93e348f03cec823/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:39:38,019 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:38,021 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:39:38,021 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bfdbd4565c936c59a93e348f03cec823 columnFamilyName B 2024-12-15T04:39:38,021 DEBUG [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:38,021 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] regionserver.HStore(327): Store=bfdbd4565c936c59a93e348f03cec823/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:39:38,022 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:38,023 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:39:38,023 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bfdbd4565c936c59a93e348f03cec823 columnFamilyName C 2024-12-15T04:39:38,023 DEBUG [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:38,024 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] regionserver.HStore(327): Store=bfdbd4565c936c59a93e348f03cec823/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:39:38,024 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:38,025 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:38,026 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:38,029 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-15T04:39:38,031 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(1085): writing seq id for bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:38,034 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T04:39:38,035 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(1102): Opened bfdbd4565c936c59a93e348f03cec823; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64196499, jitterRate=-0.04339762032032013}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-15T04:39:38,036 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegion(1001): Region open journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:38,037 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., pid=94, masterSystemTime=1734237578007 2024-12-15T04:39:38,038 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:38,038 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=94}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:38,039 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=93 updating hbase:meta row=bfdbd4565c936c59a93e348f03cec823, regionState=OPEN, openSeqNum=2, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:38,040 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-12-15T04:39:38,040 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; OpenRegionProcedure bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 in 186 msec 2024-12-15T04:39:38,041 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-12-15T04:39:38,041 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=bfdbd4565c936c59a93e348f03cec823, ASSIGN in 344 msec 2024-12-15T04:39:38,042 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-15T04:39:38,042 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237578042"}]},"ts":"1734237578042"} 2024-12-15T04:39:38,043 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-15T04:39:38,054 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=92, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-15T04:39:38,055 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2580 sec 2024-12-15T04:39:38,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-15T04:39:38,910 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 92 completed 2024-12-15T04:39:38,913 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x117e86d9 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@49e13594 2024-12-15T04:39:38,954 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3dd5b441, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:39:38,957 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:39:38,959 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32776, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:39:38,961 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-15T04:39:38,962 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36908, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-15T04:39:38,964 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-15T04:39:38,964 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-15T04:39:38,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=95, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-15T04:39:38,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742170_1346 (size=999) 2024-12-15T04:39:39,379 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-15T04:39:39,379 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-15T04:39:39,384 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-15T04:39:39,387 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bfdbd4565c936c59a93e348f03cec823, REOPEN/MOVE}] 2024-12-15T04:39:39,388 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bfdbd4565c936c59a93e348f03cec823, REOPEN/MOVE 2024-12-15T04:39:39,389 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=bfdbd4565c936c59a93e348f03cec823, regionState=CLOSING, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:39,390 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T04:39:39,390 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE; CloseRegionProcedure bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035}] 2024-12-15T04:39:39,542 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:39,542 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(124): Close bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:39,543 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T04:39:39,543 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1681): Closing bfdbd4565c936c59a93e348f03cec823, disabling compactions & flushes 2024-12-15T04:39:39,543 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:39,543 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:39,543 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. after waiting 0 ms 2024-12-15T04:39:39,543 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:39,551 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-15T04:39:39,579 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:39,579 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegion(1635): Region close journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:39,579 WARN [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] regionserver.HRegionServer(3786): Not adding moved region record: bfdbd4565c936c59a93e348f03cec823 to self. 2024-12-15T04:39:39,582 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=98}] handler.UnassignRegionHandler(170): Closed bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:39,583 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=bfdbd4565c936c59a93e348f03cec823, regionState=CLOSED 2024-12-15T04:39:39,589 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-12-15T04:39:39,589 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; CloseRegionProcedure bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 in 196 msec 2024-12-15T04:39:39,590 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=bfdbd4565c936c59a93e348f03cec823, REOPEN/MOVE; state=CLOSED, location=e56de37b85b3,43199,1734237482035; forceNewPlan=false, retain=true 2024-12-15T04:39:39,741 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=bfdbd4565c936c59a93e348f03cec823, regionState=OPENING, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:39,743 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=97, state=RUNNABLE; OpenRegionProcedure bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035}] 2024-12-15T04:39:39,896 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:39,902 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:39,902 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7285): Opening region: {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} 2024-12-15T04:39:39,903 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:39,903 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T04:39:39,903 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7327): checking encryption for bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:39,903 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7330): checking classloading for bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:39,905 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:39,905 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:39:39,906 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bfdbd4565c936c59a93e348f03cec823 columnFamilyName A 2024-12-15T04:39:39,907 DEBUG [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:39,907 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] regionserver.HStore(327): Store=bfdbd4565c936c59a93e348f03cec823/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:39:39,908 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:39,908 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:39:39,908 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bfdbd4565c936c59a93e348f03cec823 columnFamilyName B 2024-12-15T04:39:39,909 DEBUG [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:39,909 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] regionserver.HStore(327): Store=bfdbd4565c936c59a93e348f03cec823/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:39:39,909 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:39,910 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:39:39,910 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bfdbd4565c936c59a93e348f03cec823 columnFamilyName C 2024-12-15T04:39:39,910 DEBUG [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:39,910 INFO [StoreOpener-bfdbd4565c936c59a93e348f03cec823-1 {}] regionserver.HStore(327): Store=bfdbd4565c936c59a93e348f03cec823/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:39:39,910 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:39,911 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:39,912 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:39,914 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-15T04:39:39,915 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1085): writing seq id for bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:39,916 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1102): Opened bfdbd4565c936c59a93e348f03cec823; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71923611, jitterRate=0.07174532115459442}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-15T04:39:39,918 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1001): Region open journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:39,919 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., pid=99, masterSystemTime=1734237579896 2024-12-15T04:39:39,920 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:39,920 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:39,921 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=97 updating hbase:meta row=bfdbd4565c936c59a93e348f03cec823, regionState=OPEN, openSeqNum=5, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:39,923 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=97 2024-12-15T04:39:39,923 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=97, state=SUCCESS; OpenRegionProcedure bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 in 179 msec 2024-12-15T04:39:39,925 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=96 2024-12-15T04:39:39,925 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=96, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=bfdbd4565c936c59a93e348f03cec823, REOPEN/MOVE in 536 msec 2024-12-15T04:39:39,927 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-12-15T04:39:39,927 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 542 msec 2024-12-15T04:39:39,928 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 963 msec 2024-12-15T04:39:39,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=95 2024-12-15T04:39:39,930 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6cd96549 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c54a0d3 2024-12-15T04:39:40,012 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c336ea4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:39:40,014 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x31aea41b to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3875c8c5 2024-12-15T04:39:40,039 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f94d721, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:39:40,041 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1e247aa1 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@801ba40 2024-12-15T04:39:40,055 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@319559be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:39:40,056 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2205f666 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@27539bdc 2024-12-15T04:39:40,063 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c907e21, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:39:40,064 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6584e9ce to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e3203d9 2024-12-15T04:39:40,071 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61ec0f48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:39:40,072 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x798e7fd4 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7819b9e2 2024-12-15T04:39:40,079 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b308f62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:39:40,080 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7284f16d to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47679076 2024-12-15T04:39:40,088 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68035c67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:39:40,089 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x37a637ac to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4cb9e50e 2024-12-15T04:39:40,096 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3eab689a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:39:40,098 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3fa53591 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3cb726fe 2024-12-15T04:39:40,105 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@59bd764a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:39:40,106 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3512017b to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@301741f1 2024-12-15T04:39:40,113 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22a6e9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:39:40,117 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:39:40,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees 2024-12-15T04:39:40,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-15T04:39:40,119 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:39:40,119 DEBUG [hconnection-0x74ab7020-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:39:40,119 DEBUG [hconnection-0x17a5f512-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:39:40,119 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=100, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:39:40,119 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=100, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:39:40,120 DEBUG [hconnection-0x219c2515-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:39:40,120 DEBUG [hconnection-0x197fc57e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:39:40,120 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49986, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:39:40,120 DEBUG [hconnection-0x9a0410e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:39:40,121 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49988, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:39:40,121 DEBUG [hconnection-0x205f71c1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:39:40,121 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50000, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:39:40,121 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50012, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:39:40,121 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50026, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:39:40,122 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50038, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:39:40,122 DEBUG [hconnection-0xc05cf53-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:39:40,123 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50046, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:39:40,123 DEBUG [hconnection-0xcf0b5ce-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:39:40,123 DEBUG [hconnection-0x7e32aa73-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:39:40,124 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50060, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:39:40,124 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50058, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:39:40,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:40,127 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bfdbd4565c936c59a93e348f03cec823 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-15T04:39:40,128 DEBUG [hconnection-0x30f324ff-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:39:40,129 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50066, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:39:40,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=A 2024-12-15T04:39:40,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:40,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=B 2024-12-15T04:39:40,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:40,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=C 2024-12-15T04:39:40,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:40,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237640140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237640140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237640141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,146 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237640141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237640146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,159 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412159802a0f7bc514a3b8f2d198da8c80b2f_bfdbd4565c936c59a93e348f03cec823 is 50, key is test_row_0/A:col10/1734237580126/Put/seqid=0 2024-12-15T04:39:40,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742171_1347 (size=12154) 2024-12-15T04:39:40,167 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:40,172 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412159802a0f7bc514a3b8f2d198da8c80b2f_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412159802a0f7bc514a3b8f2d198da8c80b2f_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:40,173 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/83a74427aa674e86820702b65b739371, store: [table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:40,173 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/83a74427aa674e86820702b65b739371 is 175, key is test_row_0/A:col10/1734237580126/Put/seqid=0 2024-12-15T04:39:40,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742172_1348 (size=30955) 2024-12-15T04:39:40,183 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=18, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/83a74427aa674e86820702b65b739371 2024-12-15T04:39:40,203 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/6766e637d05348ea87a69a632f3a2005 is 50, key is test_row_0/B:col10/1734237580126/Put/seqid=0 2024-12-15T04:39:40,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742173_1349 (size=12001) 2024-12-15T04:39:40,207 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/6766e637d05348ea87a69a632f3a2005 2024-12-15T04:39:40,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-15T04:39:40,243 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237640242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,243 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237640242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,248 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/d288711b72144438a8cdc56ef6d95453 is 50, key is test_row_0/C:col10/1734237580126/Put/seqid=0 2024-12-15T04:39:40,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237640248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237640248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,250 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237640248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742174_1350 (size=12001) 2024-12-15T04:39:40,276 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,276 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-15T04:39:40,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:40,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:40,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:40,276 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:40,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:40,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:40,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-15T04:39:40,428 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,428 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-15T04:39:40,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:40,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:40,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:40,428 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:40,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:40,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:40,447 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237640445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,448 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237640445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237640450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,453 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237640451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,455 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237640451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,580 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,580 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-15T04:39:40,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:40,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:40,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:40,581 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] handler.RSProcedureHandler(58): pid=101 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:40,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=101 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:40,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=101 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:40,658 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/d288711b72144438a8cdc56ef6d95453 2024-12-15T04:39:40,661 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/83a74427aa674e86820702b65b739371 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/83a74427aa674e86820702b65b739371 2024-12-15T04:39:40,664 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/83a74427aa674e86820702b65b739371, entries=150, sequenceid=18, filesize=30.2 K 2024-12-15T04:39:40,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/6766e637d05348ea87a69a632f3a2005 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/6766e637d05348ea87a69a632f3a2005 2024-12-15T04:39:40,667 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/6766e637d05348ea87a69a632f3a2005, entries=150, sequenceid=18, filesize=11.7 K 2024-12-15T04:39:40,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/d288711b72144438a8cdc56ef6d95453 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/d288711b72144438a8cdc56ef6d95453 2024-12-15T04:39:40,670 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/d288711b72144438a8cdc56ef6d95453, entries=150, sequenceid=18, filesize=11.7 K 2024-12-15T04:39:40,671 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for bfdbd4565c936c59a93e348f03cec823 in 544ms, sequenceid=18, compaction requested=false 2024-12-15T04:39:40,671 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:40,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-15T04:39:40,733 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,733 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=101 2024-12-15T04:39:40,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:40,733 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2837): Flushing bfdbd4565c936c59a93e348f03cec823 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-15T04:39:40,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=A 2024-12-15T04:39:40,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:40,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=B 2024-12-15T04:39:40,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:40,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=C 2024-12-15T04:39:40,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:40,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121542cefa6c0e1049a6b2db201b5ccb6098_bfdbd4565c936c59a93e348f03cec823 is 50, key is test_row_0/A:col10/1734237580139/Put/seqid=0 2024-12-15T04:39:40,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742175_1351 (size=12154) 2024-12-15T04:39:40,752 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:40,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:40,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237640758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237640758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237640759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,762 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237640759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237640760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237640862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237640862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237640862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237640863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:40,866 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:40,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237640864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:41,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:41,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237641065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:41,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:41,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237641066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:41,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:41,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237641066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:41,072 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:41,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237641068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:41,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:41,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237641068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:41,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:41,145 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121542cefa6c0e1049a6b2db201b5ccb6098_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121542cefa6c0e1049a6b2db201b5ccb6098_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:41,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/9d45a4e3d19d40b1948e0a657cc7a27d, store: [table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:41,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/9d45a4e3d19d40b1948e0a657cc7a27d is 175, key is test_row_0/A:col10/1734237580139/Put/seqid=0 2024-12-15T04:39:41,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742176_1352 (size=30955) 2024-12-15T04:39:41,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-15T04:39:41,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:41,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237641369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:41,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:41,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237641369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:41,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:41,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237641370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:41,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:41,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237641374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:41,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:41,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237641375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:41,550 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/9d45a4e3d19d40b1948e0a657cc7a27d 2024-12-15T04:39:41,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/c486a5e6ca144e26b2538df152438de2 is 50, key is test_row_0/B:col10/1734237580139/Put/seqid=0 2024-12-15T04:39:41,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742177_1353 (size=12001) 2024-12-15T04:39:41,589 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-15T04:39:41,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:41,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237641873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:41,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:41,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237641875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:41,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:41,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237641876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:41,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:41,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237641876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:41,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:41,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237641881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:41,961 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/c486a5e6ca144e26b2538df152438de2 2024-12-15T04:39:41,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/ca94f680afce45be8e2a95665f71c55e is 50, key is test_row_0/C:col10/1734237580139/Put/seqid=0 2024-12-15T04:39:41,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742178_1354 (size=12001) 2024-12-15T04:39:41,977 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/ca94f680afce45be8e2a95665f71c55e 2024-12-15T04:39:41,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/9d45a4e3d19d40b1948e0a657cc7a27d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/9d45a4e3d19d40b1948e0a657cc7a27d 2024-12-15T04:39:41,984 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/9d45a4e3d19d40b1948e0a657cc7a27d, entries=150, sequenceid=41, filesize=30.2 K 2024-12-15T04:39:41,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/c486a5e6ca144e26b2538df152438de2 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/c486a5e6ca144e26b2538df152438de2 2024-12-15T04:39:41,988 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/c486a5e6ca144e26b2538df152438de2, entries=150, sequenceid=41, filesize=11.7 K 2024-12-15T04:39:41,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/ca94f680afce45be8e2a95665f71c55e as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/ca94f680afce45be8e2a95665f71c55e 2024-12-15T04:39:41,993 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/ca94f680afce45be8e2a95665f71c55e, entries=150, sequenceid=41, filesize=11.7 K 2024-12-15T04:39:41,994 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for bfdbd4565c936c59a93e348f03cec823 in 1261ms, sequenceid=41, compaction requested=false 2024-12-15T04:39:41,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.HRegion(2538): Flush status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:41,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:41,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=101}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=101 2024-12-15T04:39:41,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=101 2024-12-15T04:39:41,996 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=100 2024-12-15T04:39:41,996 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8760 sec 2024-12-15T04:39:41,997 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=100, table=TestAcidGuarantees in 1.8790 sec 2024-12-15T04:39:42,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=100 2024-12-15T04:39:42,222 INFO [Thread-1576 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 100 completed 2024-12-15T04:39:42,223 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:39:42,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees 2024-12-15T04:39:42,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-15T04:39:42,224 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:39:42,225 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:39:42,225 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:39:42,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-15T04:39:42,376 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:42,376 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-12-15T04:39:42,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:42,376 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2837): Flushing bfdbd4565c936c59a93e348f03cec823 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-15T04:39:42,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=A 2024-12-15T04:39:42,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:42,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=B 2024-12-15T04:39:42,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:42,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=C 2024-12-15T04:39:42,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:42,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412155654908c94f7463aac1260a7a260b5a3_bfdbd4565c936c59a93e348f03cec823 is 50, key is test_row_0/A:col10/1734237580758/Put/seqid=0 2024-12-15T04:39:42,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742179_1355 (size=12154) 2024-12-15T04:39:42,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-15T04:39:42,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:42,788 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412155654908c94f7463aac1260a7a260b5a3_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412155654908c94f7463aac1260a7a260b5a3_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:42,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/728e51d01762409bb33a8d1b60838c59, store: [table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:42,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/728e51d01762409bb33a8d1b60838c59 is 175, key is test_row_0/A:col10/1734237580758/Put/seqid=0 2024-12-15T04:39:42,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742180_1356 (size=30955) 2024-12-15T04:39:42,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-15T04:39:42,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:42,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:42,900 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:42,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237642896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:42,903 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:42,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237642899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:42,904 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:42,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237642900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:42,904 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:42,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237642900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:42,904 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:42,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237642901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:43,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:43,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237643001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:43,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:43,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237643004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:43,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:43,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237643004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:43,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:43,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237643005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:43,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:43,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237643005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:43,193 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/728e51d01762409bb33a8d1b60838c59 2024-12-15T04:39:43,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/fa54b6069f344f91bd8c6074cf0e61d9 is 50, key is test_row_0/B:col10/1734237580758/Put/seqid=0 2024-12-15T04:39:43,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742181_1357 (size=12001) 2024-12-15T04:39:43,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:43,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237643205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:43,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:43,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237643206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:43,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:43,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237643206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:43,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:43,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:43,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237643207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:43,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237643207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:43,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-15T04:39:43,511 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:43,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237643509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:43,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:43,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237643510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:43,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:43,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237643511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:43,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:43,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237643511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:43,513 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:43,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237643512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:43,601 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/fa54b6069f344f91bd8c6074cf0e61d9 2024-12-15T04:39:43,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/150481eae44d4a32bf7432603d09e16d is 50, key is test_row_0/C:col10/1734237580758/Put/seqid=0 2024-12-15T04:39:43,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742182_1358 (size=12001) 2024-12-15T04:39:44,010 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/150481eae44d4a32bf7432603d09e16d 2024-12-15T04:39:44,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/728e51d01762409bb33a8d1b60838c59 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/728e51d01762409bb33a8d1b60838c59 2024-12-15T04:39:44,017 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/728e51d01762409bb33a8d1b60838c59, entries=150, sequenceid=54, filesize=30.2 K 2024-12-15T04:39:44,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/fa54b6069f344f91bd8c6074cf0e61d9 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/fa54b6069f344f91bd8c6074cf0e61d9 2024-12-15T04:39:44,018 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:44,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237644015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:44,018 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:44,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237644015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:44,020 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:44,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237644017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:44,020 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:44,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237644018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:44,020 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:44,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237644018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:44,021 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/fa54b6069f344f91bd8c6074cf0e61d9, entries=150, sequenceid=54, filesize=11.7 K 2024-12-15T04:39:44,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/150481eae44d4a32bf7432603d09e16d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/150481eae44d4a32bf7432603d09e16d 2024-12-15T04:39:44,031 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/150481eae44d4a32bf7432603d09e16d, entries=150, sequenceid=54, filesize=11.7 K 2024-12-15T04:39:44,032 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for bfdbd4565c936c59a93e348f03cec823 in 1655ms, sequenceid=54, compaction requested=true 2024-12-15T04:39:44,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2538): Flush status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:44,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:44,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-12-15T04:39:44,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=103 2024-12-15T04:39:44,033 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-12-15T04:39:44,034 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8080 sec 2024-12-15T04:39:44,034 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees in 1.8110 sec 2024-12-15T04:39:44,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-12-15T04:39:44,328 INFO [Thread-1576 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 102 completed 2024-12-15T04:39:44,328 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:39:44,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-12-15T04:39:44,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-15T04:39:44,330 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:39:44,330 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:39:44,330 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:39:44,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-15T04:39:44,481 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:44,482 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-12-15T04:39:44,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:44,482 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing bfdbd4565c936c59a93e348f03cec823 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-15T04:39:44,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=A 2024-12-15T04:39:44,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:44,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=B 2024-12-15T04:39:44,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:44,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=C 2024-12-15T04:39:44,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:44,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121513a02600cbc84d8f9b38042bb007f24b_bfdbd4565c936c59a93e348f03cec823 is 50, key is test_row_0/A:col10/1734237582895/Put/seqid=0 2024-12-15T04:39:44,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742183_1359 (size=12154) 2024-12-15T04:39:44,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-15T04:39:44,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:44,895 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121513a02600cbc84d8f9b38042bb007f24b_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121513a02600cbc84d8f9b38042bb007f24b_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:44,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/04ddfcb7333a4d848a300059cbfbe18c, store: [table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:44,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/04ddfcb7333a4d848a300059cbfbe18c is 175, key is test_row_0/A:col10/1734237582895/Put/seqid=0 2024-12-15T04:39:44,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742184_1360 (size=30955) 2024-12-15T04:39:44,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-15T04:39:45,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:45,024 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:45,033 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:45,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237645030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:45,038 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:45,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237645030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:45,038 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:45,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237645031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:45,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:45,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237645032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:45,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:45,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237645033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:45,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:45,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237645139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:45,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:45,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237645139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:45,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:45,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237645142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:45,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:45,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237645142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:45,300 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/04ddfcb7333a4d848a300059cbfbe18c 2024-12-15T04:39:45,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/d9e7a86df6224a179fccfa04ab0f7d46 is 50, key is test_row_0/B:col10/1734237582895/Put/seqid=0 2024-12-15T04:39:45,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742185_1361 (size=12001) 2024-12-15T04:39:45,313 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/d9e7a86df6224a179fccfa04ab0f7d46 2024-12-15T04:39:45,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/316e7ef6343a4952adf02220b49bfdda is 50, key is test_row_0/C:col10/1734237582895/Put/seqid=0 2024-12-15T04:39:45,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742186_1362 (size=12001) 2024-12-15T04:39:45,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:45,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237645343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:45,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:45,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237645343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:45,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:45,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237645347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:45,349 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:45,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237645347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:45,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-15T04:39:45,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:45,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237645646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:45,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:45,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237645650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:45,653 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:45,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237645650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:45,654 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:45,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237645651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:45,730 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/316e7ef6343a4952adf02220b49bfdda 2024-12-15T04:39:45,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/04ddfcb7333a4d848a300059cbfbe18c as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/04ddfcb7333a4d848a300059cbfbe18c 2024-12-15T04:39:45,735 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/04ddfcb7333a4d848a300059cbfbe18c, entries=150, sequenceid=78, filesize=30.2 K 2024-12-15T04:39:45,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/d9e7a86df6224a179fccfa04ab0f7d46 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/d9e7a86df6224a179fccfa04ab0f7d46 2024-12-15T04:39:45,739 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/d9e7a86df6224a179fccfa04ab0f7d46, entries=150, sequenceid=78, filesize=11.7 K 2024-12-15T04:39:45,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/316e7ef6343a4952adf02220b49bfdda as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/316e7ef6343a4952adf02220b49bfdda 2024-12-15T04:39:45,742 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/316e7ef6343a4952adf02220b49bfdda, entries=150, sequenceid=78, filesize=11.7 K 2024-12-15T04:39:45,743 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for bfdbd4565c936c59a93e348f03cec823 in 1261ms, sequenceid=78, compaction requested=true 2024-12-15T04:39:45,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:45,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:45,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-15T04:39:45,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-12-15T04:39:45,745 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-12-15T04:39:45,745 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4140 sec 2024-12-15T04:39:45,745 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 1.4170 sec 2024-12-15T04:39:46,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:46,154 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bfdbd4565c936c59a93e348f03cec823 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-15T04:39:46,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=A 2024-12-15T04:39:46,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:46,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=B 2024-12-15T04:39:46,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:46,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=C 2024-12-15T04:39:46,154 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:46,160 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215e2c21c93da5e4644b3b948508b6ef162_bfdbd4565c936c59a93e348f03cec823 is 50, key is test_row_0/A:col10/1734237585031/Put/seqid=0 2024-12-15T04:39:46,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742187_1363 (size=14594) 2024-12-15T04:39:46,166 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:46,169 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215e2c21c93da5e4644b3b948508b6ef162_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215e2c21c93da5e4644b3b948508b6ef162_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:46,169 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/47ef864e674a43da80a6af4b16d531ad, store: [table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:46,170 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/47ef864e674a43da80a6af4b16d531ad is 175, key is test_row_0/A:col10/1734237585031/Put/seqid=0 2024-12-15T04:39:46,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742188_1364 (size=39549) 2024-12-15T04:39:46,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:46,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237646197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:46,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:46,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237646203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:46,209 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:46,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237646204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:46,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:46,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237646206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:46,309 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:46,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237646306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:46,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:46,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237646309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:46,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:46,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237646309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:46,314 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:46,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237646311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:46,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-15T04:39:46,433 INFO [Thread-1576 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-12-15T04:39:46,434 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:39:46,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-12-15T04:39:46,435 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:39:46,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-15T04:39:46,435 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:39:46,435 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:39:46,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237646511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:46,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237646512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:46,519 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:46,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237646514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:46,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237646516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-15T04:39:46,577 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=91, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/47ef864e674a43da80a6af4b16d531ad 2024-12-15T04:39:46,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/dfaf235bb01b4d5a9d1d8996e407f202 is 50, key is test_row_0/B:col10/1734237585031/Put/seqid=0 2024-12-15T04:39:46,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742189_1365 (size=12001) 2024-12-15T04:39:46,586 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:46,586 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-15T04:39:46,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:46,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:46,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:46,587 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:46,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:46,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:46,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-15T04:39:46,738 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:46,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-15T04:39:46,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:46,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:46,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:46,739 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:46,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:46,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:46,821 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:46,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237646820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:46,822 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:46,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237646821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:46,822 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:46,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237646821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:46,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:46,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237646823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:46,890 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:46,890 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-15T04:39:46,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:46,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:46,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:46,891 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:46,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:46,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:46,986 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/dfaf235bb01b4d5a9d1d8996e407f202 2024-12-15T04:39:46,991 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/5c642021f6a04d729ea2768472159143 is 50, key is test_row_0/C:col10/1734237585031/Put/seqid=0 2024-12-15T04:39:47,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742190_1366 (size=12001) 2024-12-15T04:39:47,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-15T04:39:47,039 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:47,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237647039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:47,040 DEBUG [Thread-1574 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4141 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., hostname=e56de37b85b3,43199,1734237482035, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:39:47,042 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:47,043 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-15T04:39:47,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:47,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:47,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:47,043 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:47,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:47,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:47,194 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:47,195 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-15T04:39:47,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:47,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:47,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:47,195 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:47,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:47,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:47,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:47,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237647323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:47,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:47,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237647323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:47,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:47,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237647323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:47,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:47,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237647328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:47,347 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:47,347 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-15T04:39:47,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:47,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:47,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:47,347 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:47,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:47,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:47,403 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/5c642021f6a04d729ea2768472159143 2024-12-15T04:39:47,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/47ef864e674a43da80a6af4b16d531ad as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/47ef864e674a43da80a6af4b16d531ad 2024-12-15T04:39:47,409 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/47ef864e674a43da80a6af4b16d531ad, entries=200, sequenceid=91, filesize=38.6 K 2024-12-15T04:39:47,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/dfaf235bb01b4d5a9d1d8996e407f202 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/dfaf235bb01b4d5a9d1d8996e407f202 2024-12-15T04:39:47,412 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/dfaf235bb01b4d5a9d1d8996e407f202, entries=150, sequenceid=91, filesize=11.7 K 2024-12-15T04:39:47,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/5c642021f6a04d729ea2768472159143 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/5c642021f6a04d729ea2768472159143 2024-12-15T04:39:47,415 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/5c642021f6a04d729ea2768472159143, entries=150, sequenceid=91, filesize=11.7 K 2024-12-15T04:39:47,416 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for bfdbd4565c936c59a93e348f03cec823 in 1262ms, sequenceid=91, compaction requested=true 2024-12-15T04:39:47,416 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:47,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfdbd4565c936c59a93e348f03cec823:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:39:47,416 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:47,416 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-15T04:39:47,416 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-15T04:39:47,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfdbd4565c936c59a93e348f03cec823:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:39:47,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:47,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfdbd4565c936c59a93e348f03cec823:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:39:47,417 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:47,417 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 163369 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-15T04:39:47,417 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60005 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-15T04:39:47,418 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): bfdbd4565c936c59a93e348f03cec823/A is initiating minor compaction (all files) 2024-12-15T04:39:47,418 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): bfdbd4565c936c59a93e348f03cec823/B is initiating minor compaction (all files) 2024-12-15T04:39:47,418 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bfdbd4565c936c59a93e348f03cec823/B in TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:47,418 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bfdbd4565c936c59a93e348f03cec823/A in TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:47,418 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/6766e637d05348ea87a69a632f3a2005, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/c486a5e6ca144e26b2538df152438de2, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/fa54b6069f344f91bd8c6074cf0e61d9, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/d9e7a86df6224a179fccfa04ab0f7d46, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/dfaf235bb01b4d5a9d1d8996e407f202] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp, totalSize=58.6 K 2024-12-15T04:39:47,418 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/83a74427aa674e86820702b65b739371, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/9d45a4e3d19d40b1948e0a657cc7a27d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/728e51d01762409bb33a8d1b60838c59, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/04ddfcb7333a4d848a300059cbfbe18c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/47ef864e674a43da80a6af4b16d531ad] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp, totalSize=159.5 K 2024-12-15T04:39:47,418 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:47,418 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. files: [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/83a74427aa674e86820702b65b739371, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/9d45a4e3d19d40b1948e0a657cc7a27d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/728e51d01762409bb33a8d1b60838c59, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/04ddfcb7333a4d848a300059cbfbe18c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/47ef864e674a43da80a6af4b16d531ad] 2024-12-15T04:39:47,418 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 6766e637d05348ea87a69a632f3a2005, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1734237580126 2024-12-15T04:39:47,418 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 83a74427aa674e86820702b65b739371, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1734237580126 2024-12-15T04:39:47,418 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting c486a5e6ca144e26b2538df152438de2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734237580136 2024-12-15T04:39:47,418 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9d45a4e3d19d40b1948e0a657cc7a27d, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734237580136 2024-12-15T04:39:47,418 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting fa54b6069f344f91bd8c6074cf0e61d9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734237580758 2024-12-15T04:39:47,419 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 728e51d01762409bb33a8d1b60838c59, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734237580758 2024-12-15T04:39:47,419 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting d9e7a86df6224a179fccfa04ab0f7d46, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734237582895 2024-12-15T04:39:47,419 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 04ddfcb7333a4d848a300059cbfbe18c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734237582895 2024-12-15T04:39:47,419 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting dfaf235bb01b4d5a9d1d8996e407f202, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734237585031 2024-12-15T04:39:47,419 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47ef864e674a43da80a6af4b16d531ad, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734237585029 2024-12-15T04:39:47,424 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:47,426 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfdbd4565c936c59a93e348f03cec823#B#compaction#307 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:47,427 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412152cfd1d4de951404eb5c1f227cbab5755_bfdbd4565c936c59a93e348f03cec823 store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:47,427 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/978b810691d44806a05b0fd777ee6426 is 50, key is test_row_0/B:col10/1734237585031/Put/seqid=0 2024-12-15T04:39:47,428 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412152cfd1d4de951404eb5c1f227cbab5755_bfdbd4565c936c59a93e348f03cec823, store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:47,429 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412152cfd1d4de951404eb5c1f227cbab5755_bfdbd4565c936c59a93e348f03cec823 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:47,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742191_1367 (size=12173) 2024-12-15T04:39:47,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742192_1368 (size=4469) 2024-12-15T04:39:47,432 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfdbd4565c936c59a93e348f03cec823#A#compaction#306 average throughput is 3.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:47,433 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/a73dd3c3f97f457b9360d88995db9f21 is 175, key is test_row_0/A:col10/1734237585031/Put/seqid=0 2024-12-15T04:39:47,436 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/978b810691d44806a05b0fd777ee6426 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/978b810691d44806a05b0fd777ee6426 2024-12-15T04:39:47,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742193_1369 (size=31127) 2024-12-15T04:39:47,441 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in bfdbd4565c936c59a93e348f03cec823/B of bfdbd4565c936c59a93e348f03cec823 into 978b810691d44806a05b0fd777ee6426(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:47,441 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:47,441 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., storeName=bfdbd4565c936c59a93e348f03cec823/B, priority=11, startTime=1734237587416; duration=0sec 2024-12-15T04:39:47,442 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:47,442 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfdbd4565c936c59a93e348f03cec823:B 2024-12-15T04:39:47,442 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-12-15T04:39:47,443 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60005 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-12-15T04:39:47,443 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): bfdbd4565c936c59a93e348f03cec823/C is initiating minor compaction (all files) 2024-12-15T04:39:47,443 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bfdbd4565c936c59a93e348f03cec823/C in TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:47,443 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/d288711b72144438a8cdc56ef6d95453, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/ca94f680afce45be8e2a95665f71c55e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/150481eae44d4a32bf7432603d09e16d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/316e7ef6343a4952adf02220b49bfdda, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/5c642021f6a04d729ea2768472159143] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp, totalSize=58.6 K 2024-12-15T04:39:47,444 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting d288711b72144438a8cdc56ef6d95453, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1734237580126 2024-12-15T04:39:47,444 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting ca94f680afce45be8e2a95665f71c55e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1734237580136 2024-12-15T04:39:47,445 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 150481eae44d4a32bf7432603d09e16d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734237580758 2024-12-15T04:39:47,445 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/a73dd3c3f97f457b9360d88995db9f21 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/a73dd3c3f97f457b9360d88995db9f21 2024-12-15T04:39:47,445 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 316e7ef6343a4952adf02220b49bfdda, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1734237582895 2024-12-15T04:39:47,446 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c642021f6a04d729ea2768472159143, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734237585031 2024-12-15T04:39:47,451 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in bfdbd4565c936c59a93e348f03cec823/A of bfdbd4565c936c59a93e348f03cec823 into a73dd3c3f97f457b9360d88995db9f21(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:47,451 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:47,451 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., storeName=bfdbd4565c936c59a93e348f03cec823/A, priority=11, startTime=1734237587416; duration=0sec 2024-12-15T04:39:47,451 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:47,452 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfdbd4565c936c59a93e348f03cec823:A 2024-12-15T04:39:47,456 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfdbd4565c936c59a93e348f03cec823#C#compaction#308 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:47,456 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/e873e71e9aac4a81bcb5e9d972936603 is 50, key is test_row_0/C:col10/1734237585031/Put/seqid=0 2024-12-15T04:39:47,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742194_1370 (size=12173) 2024-12-15T04:39:47,499 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:47,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-12-15T04:39:47,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:47,500 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing bfdbd4565c936c59a93e348f03cec823 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-15T04:39:47,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=A 2024-12-15T04:39:47,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:47,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=B 2024-12-15T04:39:47,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:47,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=C 2024-12-15T04:39:47,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:47,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215249e1099f16a475193995d4f5bfa40a1_bfdbd4565c936c59a93e348f03cec823 is 50, key is test_row_0/A:col10/1734237586204/Put/seqid=0 2024-12-15T04:39:47,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742195_1371 (size=12154) 2024-12-15T04:39:47,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-15T04:39:47,864 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/e873e71e9aac4a81bcb5e9d972936603 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/e873e71e9aac4a81bcb5e9d972936603 2024-12-15T04:39:47,868 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in bfdbd4565c936c59a93e348f03cec823/C of bfdbd4565c936c59a93e348f03cec823 into e873e71e9aac4a81bcb5e9d972936603(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:47,868 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:47,868 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., storeName=bfdbd4565c936c59a93e348f03cec823/C, priority=11, startTime=1734237587417; duration=0sec 2024-12-15T04:39:47,868 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:47,868 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfdbd4565c936c59a93e348f03cec823:C 2024-12-15T04:39:47,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:47,925 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215249e1099f16a475193995d4f5bfa40a1_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215249e1099f16a475193995d4f5bfa40a1_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:47,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/a69b344ccb174592a99de97997b96778, store: [table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:47,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/a69b344ccb174592a99de97997b96778 is 175, key is test_row_0/A:col10/1734237586204/Put/seqid=0 2024-12-15T04:39:47,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742196_1372 (size=30955) 2024-12-15T04:39:48,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:48,329 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:48,330 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=116, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/a69b344ccb174592a99de97997b96778 2024-12-15T04:39:48,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/ae78262793c74039945c1f1121f8cf27 is 50, key is test_row_0/B:col10/1734237586204/Put/seqid=0 2024-12-15T04:39:48,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742197_1373 (size=12001) 2024-12-15T04:39:48,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:48,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237648340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:48,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:48,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237648344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:48,346 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:48,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237648344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:48,347 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:48,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237648344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:48,445 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:48,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237648445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:48,449 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:48,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237648446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:48,449 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:48,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237648448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:48,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-15T04:39:48,649 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:48,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237648647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:48,649 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:48,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237648649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:48,653 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:48,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237648650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:48,742 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/ae78262793c74039945c1f1121f8cf27 2024-12-15T04:39:48,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/5e8c0708dd87490d98812f2d23721bd8 is 50, key is test_row_0/C:col10/1734237586204/Put/seqid=0 2024-12-15T04:39:48,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742198_1374 (size=12001) 2024-12-15T04:39:48,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:48,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:48,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237648952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:48,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237648952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:48,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:48,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237648956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:49,152 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/5e8c0708dd87490d98812f2d23721bd8 2024-12-15T04:39:49,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/a69b344ccb174592a99de97997b96778 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/a69b344ccb174592a99de97997b96778 2024-12-15T04:39:49,158 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/a69b344ccb174592a99de97997b96778, entries=150, sequenceid=116, filesize=30.2 K 2024-12-15T04:39:49,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/ae78262793c74039945c1f1121f8cf27 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/ae78262793c74039945c1f1121f8cf27 2024-12-15T04:39:49,161 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/ae78262793c74039945c1f1121f8cf27, entries=150, sequenceid=116, filesize=11.7 K 2024-12-15T04:39:49,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/5e8c0708dd87490d98812f2d23721bd8 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/5e8c0708dd87490d98812f2d23721bd8 2024-12-15T04:39:49,164 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/5e8c0708dd87490d98812f2d23721bd8, entries=150, sequenceid=116, filesize=11.7 K 2024-12-15T04:39:49,165 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for bfdbd4565c936c59a93e348f03cec823 in 1666ms, sequenceid=116, compaction requested=false 2024-12-15T04:39:49,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:49,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:49,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-12-15T04:39:49,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-12-15T04:39:49,167 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-12-15T04:39:49,167 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7310 sec 2024-12-15T04:39:49,168 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 2.7330 sec 2024-12-15T04:39:49,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:49,461 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bfdbd4565c936c59a93e348f03cec823 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-15T04:39:49,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=A 2024-12-15T04:39:49,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:49,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=B 2024-12-15T04:39:49,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:49,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=C 2024-12-15T04:39:49,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:49,467 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215bcaf620b148d4ae485e2b4c88dd8ad9e_bfdbd4565c936c59a93e348f03cec823 is 50, key is test_row_0/A:col10/1734237588343/Put/seqid=0 2024-12-15T04:39:49,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742199_1375 (size=14694) 2024-12-15T04:39:49,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:49,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237649488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:49,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:49,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237649492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:49,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:49,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237649492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:49,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:49,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237649593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:49,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:49,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237649593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:49,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:49,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237649593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:49,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:49,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237649797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:49,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:49,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237649797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:49,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:49,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237649798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:49,873 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:49,876 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215bcaf620b148d4ae485e2b4c88dd8ad9e_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215bcaf620b148d4ae485e2b4c88dd8ad9e_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:49,877 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/eee28abbfc8343eeac4b461067727d01, store: [table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:49,877 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/eee28abbfc8343eeac4b461067727d01 is 175, key is test_row_0/A:col10/1734237588343/Put/seqid=0 2024-12-15T04:39:49,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742200_1376 (size=39649) 2024-12-15T04:39:49,881 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=131, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/eee28abbfc8343eeac4b461067727d01 2024-12-15T04:39:49,887 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/9e1ef6ced12d47c0962ebc76ca2536ac is 50, key is test_row_0/B:col10/1734237588343/Put/seqid=0 2024-12-15T04:39:49,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742201_1377 (size=12101) 2024-12-15T04:39:50,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:50,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237650103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:50,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:50,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237650103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:50,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:50,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237650103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:50,290 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/9e1ef6ced12d47c0962ebc76ca2536ac 2024-12-15T04:39:50,295 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/d5c52fc31d5e4eb5a57f4ad72f9f793d is 50, key is test_row_0/C:col10/1734237588343/Put/seqid=0 2024-12-15T04:39:50,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742202_1378 (size=12101) 2024-12-15T04:39:50,352 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:50,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237650349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:50,353 DEBUG [Thread-1566 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4148 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., hostname=e56de37b85b3,43199,1734237482035, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:39:50,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-12-15T04:39:50,539 INFO [Thread-1576 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-12-15T04:39:50,540 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:39:50,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-12-15T04:39:50,541 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:39:50,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-15T04:39:50,541 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:39:50,541 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:39:50,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:50,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237650606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:50,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:50,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237650607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:50,611 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:50,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237650608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:50,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-15T04:39:50,692 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:50,692 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-15T04:39:50,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:50,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:50,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:50,693 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:50,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:50,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:50,699 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/d5c52fc31d5e4eb5a57f4ad72f9f793d 2024-12-15T04:39:50,702 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/eee28abbfc8343eeac4b461067727d01 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/eee28abbfc8343eeac4b461067727d01 2024-12-15T04:39:50,705 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/eee28abbfc8343eeac4b461067727d01, entries=200, sequenceid=131, filesize=38.7 K 2024-12-15T04:39:50,706 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/9e1ef6ced12d47c0962ebc76ca2536ac as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/9e1ef6ced12d47c0962ebc76ca2536ac 2024-12-15T04:39:50,708 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/9e1ef6ced12d47c0962ebc76ca2536ac, entries=150, sequenceid=131, filesize=11.8 K 2024-12-15T04:39:50,709 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/d5c52fc31d5e4eb5a57f4ad72f9f793d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/d5c52fc31d5e4eb5a57f4ad72f9f793d 2024-12-15T04:39:50,712 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/d5c52fc31d5e4eb5a57f4ad72f9f793d, entries=150, sequenceid=131, filesize=11.8 K 2024-12-15T04:39:50,712 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for bfdbd4565c936c59a93e348f03cec823 in 1251ms, sequenceid=131, compaction requested=true 2024-12-15T04:39:50,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:50,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfdbd4565c936c59a93e348f03cec823:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:39:50,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:50,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfdbd4565c936c59a93e348f03cec823:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:39:50,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:50,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfdbd4565c936c59a93e348f03cec823:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:39:50,713 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:50,713 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:50,713 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:50,713 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36275 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:50,713 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101731 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:50,713 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): bfdbd4565c936c59a93e348f03cec823/B is initiating minor compaction (all files) 2024-12-15T04:39:50,713 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): bfdbd4565c936c59a93e348f03cec823/A is initiating minor compaction (all files) 2024-12-15T04:39:50,713 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bfdbd4565c936c59a93e348f03cec823/B in TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:50,713 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bfdbd4565c936c59a93e348f03cec823/A in TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:50,713 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/978b810691d44806a05b0fd777ee6426, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/ae78262793c74039945c1f1121f8cf27, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/9e1ef6ced12d47c0962ebc76ca2536ac] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp, totalSize=35.4 K 2024-12-15T04:39:50,713 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/a73dd3c3f97f457b9360d88995db9f21, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/a69b344ccb174592a99de97997b96778, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/eee28abbfc8343eeac4b461067727d01] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp, totalSize=99.3 K 2024-12-15T04:39:50,713 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:50,714 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. files: [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/a73dd3c3f97f457b9360d88995db9f21, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/a69b344ccb174592a99de97997b96778, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/eee28abbfc8343eeac4b461067727d01] 2024-12-15T04:39:50,714 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 978b810691d44806a05b0fd777ee6426, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734237585031 2024-12-15T04:39:50,714 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting a73dd3c3f97f457b9360d88995db9f21, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734237585031 2024-12-15T04:39:50,714 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting ae78262793c74039945c1f1121f8cf27, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1734237586179 2024-12-15T04:39:50,714 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting a69b344ccb174592a99de97997b96778, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1734237586179 2024-12-15T04:39:50,714 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e1ef6ced12d47c0962ebc76ca2536ac, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734237588334 2024-12-15T04:39:50,714 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting eee28abbfc8343eeac4b461067727d01, keycount=200, bloomtype=ROW, size=38.7 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734237588334 2024-12-15T04:39:50,719 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:50,720 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfdbd4565c936c59a93e348f03cec823#B#compaction#315 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:50,720 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/6b78fc08c6474f73b02c0b9623e48932 is 50, key is test_row_0/B:col10/1734237588343/Put/seqid=0 2024-12-15T04:39:50,723 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121562fcdffa0c3348cc9d770d9c2de44884_bfdbd4565c936c59a93e348f03cec823 store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:50,724 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121562fcdffa0c3348cc9d770d9c2de44884_bfdbd4565c936c59a93e348f03cec823, store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:50,725 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121562fcdffa0c3348cc9d770d9c2de44884_bfdbd4565c936c59a93e348f03cec823 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:50,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742203_1379 (size=12375) 2024-12-15T04:39:50,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742204_1380 (size=4469) 2024-12-15T04:39:50,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-15T04:39:50,845 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:50,845 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-12-15T04:39:50,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:50,845 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing bfdbd4565c936c59a93e348f03cec823 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-15T04:39:50,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=A 2024-12-15T04:39:50,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:50,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=B 2024-12-15T04:39:50,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:50,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=C 2024-12-15T04:39:50,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:50,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121525b86597306a4c65b13fc912b90c878f_bfdbd4565c936c59a93e348f03cec823 is 50, key is test_row_0/A:col10/1734237589480/Put/seqid=0 2024-12-15T04:39:50,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742205_1381 (size=12304) 2024-12-15T04:39:50,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:50,858 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121525b86597306a4c65b13fc912b90c878f_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121525b86597306a4c65b13fc912b90c878f_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:50,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/b4c67c73958446a28551d92fd7d1fa39, store: [table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:50,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/b4c67c73958446a28551d92fd7d1fa39 is 175, key is test_row_0/A:col10/1734237589480/Put/seqid=0 2024-12-15T04:39:50,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742206_1382 (size=31105) 2024-12-15T04:39:51,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:51,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:51,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:51,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237651107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:51,141 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfdbd4565c936c59a93e348f03cec823#A#compaction#316 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:51,141 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/25fa1f025ee5406a94835b054f8f9c92 is 175, key is test_row_0/A:col10/1734237588343/Put/seqid=0 2024-12-15T04:39:51,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-15T04:39:51,143 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/6b78fc08c6474f73b02c0b9623e48932 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/6b78fc08c6474f73b02c0b9623e48932 2024-12-15T04:39:51,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742207_1383 (size=31329) 2024-12-15T04:39:51,147 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bfdbd4565c936c59a93e348f03cec823/B of bfdbd4565c936c59a93e348f03cec823 into 6b78fc08c6474f73b02c0b9623e48932(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:51,147 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:51,147 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., storeName=bfdbd4565c936c59a93e348f03cec823/B, priority=13, startTime=1734237590713; duration=0sec 2024-12-15T04:39:51,147 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:51,147 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfdbd4565c936c59a93e348f03cec823:B 2024-12-15T04:39:51,147 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:51,148 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36275 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:51,148 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): bfdbd4565c936c59a93e348f03cec823/C is initiating minor compaction (all files) 2024-12-15T04:39:51,148 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bfdbd4565c936c59a93e348f03cec823/C in TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:51,148 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/e873e71e9aac4a81bcb5e9d972936603, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/5e8c0708dd87490d98812f2d23721bd8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/d5c52fc31d5e4eb5a57f4ad72f9f793d] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp, totalSize=35.4 K 2024-12-15T04:39:51,148 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting e873e71e9aac4a81bcb5e9d972936603, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1734237585031 2024-12-15T04:39:51,148 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e8c0708dd87490d98812f2d23721bd8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1734237586179 2024-12-15T04:39:51,148 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting d5c52fc31d5e4eb5a57f4ad72f9f793d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734237588334 2024-12-15T04:39:51,153 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfdbd4565c936c59a93e348f03cec823#C#compaction#318 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:51,153 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/582dcdf8f3f14262bb2e219f3eecb738 is 50, key is test_row_0/C:col10/1734237588343/Put/seqid=0 2024-12-15T04:39:51,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742208_1384 (size=12375) 2024-12-15T04:39:51,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:51,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237651213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:51,262 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=154, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/b4c67c73958446a28551d92fd7d1fa39 2024-12-15T04:39:51,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/ff5899d263694c2d92bf21189189460d is 50, key is test_row_0/B:col10/1734237589480/Put/seqid=0 2024-12-15T04:39:51,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742209_1385 (size=12151) 2024-12-15T04:39:51,422 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:51,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237651419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:51,548 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/25fa1f025ee5406a94835b054f8f9c92 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/25fa1f025ee5406a94835b054f8f9c92 2024-12-15T04:39:51,552 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bfdbd4565c936c59a93e348f03cec823/A of bfdbd4565c936c59a93e348f03cec823 into 25fa1f025ee5406a94835b054f8f9c92(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:51,552 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:51,552 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., storeName=bfdbd4565c936c59a93e348f03cec823/A, priority=13, startTime=1734237590712; duration=0sec 2024-12-15T04:39:51,552 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:51,552 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfdbd4565c936c59a93e348f03cec823:A 2024-12-15T04:39:51,562 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/582dcdf8f3f14262bb2e219f3eecb738 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/582dcdf8f3f14262bb2e219f3eecb738 2024-12-15T04:39:51,566 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bfdbd4565c936c59a93e348f03cec823/C of bfdbd4565c936c59a93e348f03cec823 into 582dcdf8f3f14262bb2e219f3eecb738(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:51,566 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:51,566 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., storeName=bfdbd4565c936c59a93e348f03cec823/C, priority=13, startTime=1734237590713; duration=0sec 2024-12-15T04:39:51,566 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:51,566 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfdbd4565c936c59a93e348f03cec823:C 2024-12-15T04:39:51,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:51,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237651614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:51,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:51,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237651615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:51,620 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:51,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237651616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:51,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-15T04:39:51,672 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/ff5899d263694c2d92bf21189189460d 2024-12-15T04:39:51,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/283bcbb10d0b4012a98518bcec46b682 is 50, key is test_row_0/C:col10/1734237589480/Put/seqid=0 2024-12-15T04:39:51,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742210_1386 (size=12151) 2024-12-15T04:39:51,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:51,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237651725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:52,091 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=154 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/283bcbb10d0b4012a98518bcec46b682 2024-12-15T04:39:52,095 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/b4c67c73958446a28551d92fd7d1fa39 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/b4c67c73958446a28551d92fd7d1fa39 2024-12-15T04:39:52,097 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/b4c67c73958446a28551d92fd7d1fa39, entries=150, sequenceid=154, filesize=30.4 K 2024-12-15T04:39:52,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/ff5899d263694c2d92bf21189189460d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/ff5899d263694c2d92bf21189189460d 2024-12-15T04:39:52,101 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/ff5899d263694c2d92bf21189189460d, entries=150, sequenceid=154, filesize=11.9 K 2024-12-15T04:39:52,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/283bcbb10d0b4012a98518bcec46b682 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/283bcbb10d0b4012a98518bcec46b682 2024-12-15T04:39:52,105 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/283bcbb10d0b4012a98518bcec46b682, entries=150, sequenceid=154, filesize=11.9 K 2024-12-15T04:39:52,105 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for bfdbd4565c936c59a93e348f03cec823 in 1260ms, sequenceid=154, compaction requested=false 2024-12-15T04:39:52,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:52,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:52,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-15T04:39:52,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-15T04:39:52,108 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-12-15T04:39:52,108 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5660 sec 2024-12-15T04:39:52,109 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 1.5680 sec 2024-12-15T04:39:52,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:52,234 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bfdbd4565c936c59a93e348f03cec823 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-15T04:39:52,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=A 2024-12-15T04:39:52,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:52,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=B 2024-12-15T04:39:52,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:52,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=C 2024-12-15T04:39:52,234 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:52,239 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121551d90326e85a4b1082f39436b741560b_bfdbd4565c936c59a93e348f03cec823 is 50, key is test_row_0/A:col10/1734237591100/Put/seqid=0 2024-12-15T04:39:52,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742211_1387 (size=14794) 2024-12-15T04:39:52,248 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:52,251 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121551d90326e85a4b1082f39436b741560b_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121551d90326e85a4b1082f39436b741560b_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:52,252 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/1ada5a77ee244604a548d664932f9874, store: [table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:52,252 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/1ada5a77ee244604a548d664932f9874 is 175, key is test_row_0/A:col10/1734237591100/Put/seqid=0 2024-12-15T04:39:52,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742212_1388 (size=39749) 2024-12-15T04:39:52,255 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=171, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/1ada5a77ee244604a548d664932f9874 2024-12-15T04:39:52,261 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/a199df69913c49e7b725f4b7551ffcfc is 50, key is test_row_0/B:col10/1734237591100/Put/seqid=0 2024-12-15T04:39:52,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742213_1389 (size=12151) 2024-12-15T04:39:52,327 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:52,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237652324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:52,429 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:52,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237652428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:52,632 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:52,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237652631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:52,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-12-15T04:39:52,644 INFO [Thread-1576 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-12-15T04:39:52,645 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:39:52,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-12-15T04:39:52,646 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:39:52,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-15T04:39:52,646 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:39:52,646 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:39:52,665 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/a199df69913c49e7b725f4b7551ffcfc 2024-12-15T04:39:52,670 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/2bd17b9ff0734702be05ba6d19003cb5 is 50, key is test_row_0/C:col10/1734237591100/Put/seqid=0 2024-12-15T04:39:52,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742214_1390 (size=12151) 2024-12-15T04:39:52,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/2bd17b9ff0734702be05ba6d19003cb5 2024-12-15T04:39:52,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/1ada5a77ee244604a548d664932f9874 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1ada5a77ee244604a548d664932f9874 2024-12-15T04:39:52,679 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1ada5a77ee244604a548d664932f9874, entries=200, sequenceid=171, filesize=38.8 K 2024-12-15T04:39:52,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/a199df69913c49e7b725f4b7551ffcfc as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/a199df69913c49e7b725f4b7551ffcfc 2024-12-15T04:39:52,682 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/a199df69913c49e7b725f4b7551ffcfc, entries=150, sequenceid=171, filesize=11.9 K 2024-12-15T04:39:52,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/2bd17b9ff0734702be05ba6d19003cb5 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/2bd17b9ff0734702be05ba6d19003cb5 2024-12-15T04:39:52,685 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/2bd17b9ff0734702be05ba6d19003cb5, entries=150, sequenceid=171, filesize=11.9 K 2024-12-15T04:39:52,686 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for bfdbd4565c936c59a93e348f03cec823 in 453ms, sequenceid=171, compaction requested=true 2024-12-15T04:39:52,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:52,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfdbd4565c936c59a93e348f03cec823:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:39:52,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:52,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfdbd4565c936c59a93e348f03cec823:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:39:52,686 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:52,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:52,686 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:52,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfdbd4565c936c59a93e348f03cec823:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:39:52,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:52,687 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36677 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:52,687 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102183 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:52,687 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): bfdbd4565c936c59a93e348f03cec823/B is initiating minor compaction (all files) 2024-12-15T04:39:52,687 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): bfdbd4565c936c59a93e348f03cec823/A is initiating minor compaction (all files) 2024-12-15T04:39:52,687 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bfdbd4565c936c59a93e348f03cec823/B in TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:52,687 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bfdbd4565c936c59a93e348f03cec823/A in TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:52,687 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/6b78fc08c6474f73b02c0b9623e48932, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/ff5899d263694c2d92bf21189189460d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/a199df69913c49e7b725f4b7551ffcfc] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp, totalSize=35.8 K 2024-12-15T04:39:52,687 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/25fa1f025ee5406a94835b054f8f9c92, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/b4c67c73958446a28551d92fd7d1fa39, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1ada5a77ee244604a548d664932f9874] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp, totalSize=99.8 K 2024-12-15T04:39:52,687 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:52,687 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. files: [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/25fa1f025ee5406a94835b054f8f9c92, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/b4c67c73958446a28551d92fd7d1fa39, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1ada5a77ee244604a548d664932f9874] 2024-12-15T04:39:52,688 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b78fc08c6474f73b02c0b9623e48932, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734237588334 2024-12-15T04:39:52,688 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 25fa1f025ee5406a94835b054f8f9c92, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734237588334 2024-12-15T04:39:52,688 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting ff5899d263694c2d92bf21189189460d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1734237589480 2024-12-15T04:39:52,688 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4c67c73958446a28551d92fd7d1fa39, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1734237589480 2024-12-15T04:39:52,688 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ada5a77ee244604a548d664932f9874, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734237591096 2024-12-15T04:39:52,688 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting a199df69913c49e7b725f4b7551ffcfc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734237591096 2024-12-15T04:39:52,692 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:52,694 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241215323e48e049bb4bcb81e3278787da8982_bfdbd4565c936c59a93e348f03cec823 store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:52,695 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfdbd4565c936c59a93e348f03cec823#B#compaction#325 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:52,695 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241215323e48e049bb4bcb81e3278787da8982_bfdbd4565c936c59a93e348f03cec823, store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:52,696 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215323e48e049bb4bcb81e3278787da8982_bfdbd4565c936c59a93e348f03cec823 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:52,696 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/f6f8b6a55f3d499ab25814c332d82cac is 50, key is test_row_0/B:col10/1734237591100/Put/seqid=0 2024-12-15T04:39:52,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742215_1391 (size=4469) 2024-12-15T04:39:52,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742216_1392 (size=12527) 2024-12-15T04:39:52,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-15T04:39:52,805 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:52,806 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-12-15T04:39:52,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:52,806 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing bfdbd4565c936c59a93e348f03cec823 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-15T04:39:52,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=A 2024-12-15T04:39:52,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:52,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=B 2024-12-15T04:39:52,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:52,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=C 2024-12-15T04:39:52,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:52,811 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412157ff6a5634970461f996e756ab125d7dc_bfdbd4565c936c59a93e348f03cec823 is 50, key is test_row_0/A:col10/1734237592306/Put/seqid=0 2024-12-15T04:39:52,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742217_1393 (size=12304) 2024-12-15T04:39:52,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:52,938 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:52,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-15T04:39:52,989 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:52,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237652983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:53,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:53,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237653090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:53,109 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfdbd4565c936c59a93e348f03cec823#A#compaction#324 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:53,110 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/009a982a253c496ba87a9e2bf9207c79 is 175, key is test_row_0/A:col10/1734237591100/Put/seqid=0 2024-12-15T04:39:53,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742218_1394 (size=31481) 2024-12-15T04:39:53,116 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/f6f8b6a55f3d499ab25814c332d82cac as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/f6f8b6a55f3d499ab25814c332d82cac 2024-12-15T04:39:53,119 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bfdbd4565c936c59a93e348f03cec823/B of bfdbd4565c936c59a93e348f03cec823 into f6f8b6a55f3d499ab25814c332d82cac(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:53,119 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:53,119 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., storeName=bfdbd4565c936c59a93e348f03cec823/B, priority=13, startTime=1734237592686; duration=0sec 2024-12-15T04:39:53,119 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:53,119 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfdbd4565c936c59a93e348f03cec823:B 2024-12-15T04:39:53,119 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:53,120 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36677 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:53,120 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): bfdbd4565c936c59a93e348f03cec823/C is initiating minor compaction (all files) 2024-12-15T04:39:53,120 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bfdbd4565c936c59a93e348f03cec823/C in TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:53,120 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/582dcdf8f3f14262bb2e219f3eecb738, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/283bcbb10d0b4012a98518bcec46b682, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/2bd17b9ff0734702be05ba6d19003cb5] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp, totalSize=35.8 K 2024-12-15T04:39:53,120 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 582dcdf8f3f14262bb2e219f3eecb738, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734237588334 2024-12-15T04:39:53,121 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 283bcbb10d0b4012a98518bcec46b682, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=154, earliestPutTs=1734237589480 2024-12-15T04:39:53,121 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 2bd17b9ff0734702be05ba6d19003cb5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734237591096 2024-12-15T04:39:53,126 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfdbd4565c936c59a93e348f03cec823#C#compaction#327 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:53,127 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/fd183f7357ad4941a43f20be330ee6e7 is 50, key is test_row_0/C:col10/1734237591100/Put/seqid=0 2024-12-15T04:39:53,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742219_1395 (size=12527) 2024-12-15T04:39:53,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:53,218 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412157ff6a5634970461f996e756ab125d7dc_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412157ff6a5634970461f996e756ab125d7dc_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:53,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/cec0657ecb7746788308aae37b2f4854, store: [table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:53,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/cec0657ecb7746788308aae37b2f4854 is 175, key is test_row_0/A:col10/1734237592306/Put/seqid=0 2024-12-15T04:39:53,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742220_1396 (size=31105) 2024-12-15T04:39:53,225 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=193, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/cec0657ecb7746788308aae37b2f4854 2024-12-15T04:39:53,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/a6315ae372df448aa202171e2102951f is 50, key is test_row_0/B:col10/1734237592306/Put/seqid=0 2024-12-15T04:39:53,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742221_1397 (size=12151) 2024-12-15T04:39:53,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-15T04:39:53,296 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:53,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237653294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:53,516 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/009a982a253c496ba87a9e2bf9207c79 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/009a982a253c496ba87a9e2bf9207c79 2024-12-15T04:39:53,519 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bfdbd4565c936c59a93e348f03cec823/A of bfdbd4565c936c59a93e348f03cec823 into 009a982a253c496ba87a9e2bf9207c79(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:53,519 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:53,519 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., storeName=bfdbd4565c936c59a93e348f03cec823/A, priority=13, startTime=1734237592686; duration=0sec 2024-12-15T04:39:53,519 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:53,519 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfdbd4565c936c59a93e348f03cec823:A 2024-12-15T04:39:53,533 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/fd183f7357ad4941a43f20be330ee6e7 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/fd183f7357ad4941a43f20be330ee6e7 2024-12-15T04:39:53,536 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bfdbd4565c936c59a93e348f03cec823/C of bfdbd4565c936c59a93e348f03cec823 into fd183f7357ad4941a43f20be330ee6e7(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:53,536 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:53,536 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., storeName=bfdbd4565c936c59a93e348f03cec823/C, priority=13, startTime=1734237592686; duration=0sec 2024-12-15T04:39:53,536 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:53,536 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfdbd4565c936c59a93e348f03cec823:C 2024-12-15T04:39:53,599 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:53,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237653597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:53,628 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:53,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237653626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:53,628 DEBUG [Thread-1568 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4137 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., hostname=e56de37b85b3,43199,1734237482035, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:39:53,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:53,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237653633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:53,635 DEBUG [Thread-1572 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4143 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., hostname=e56de37b85b3,43199,1734237482035, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:39:53,640 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/a6315ae372df448aa202171e2102951f 2024-12-15T04:39:53,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:53,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237653640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:53,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/6bebf8093d124ee69b1741d24e009587 is 50, key is test_row_0/C:col10/1734237592306/Put/seqid=0 2024-12-15T04:39:53,646 DEBUG [Thread-1570 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4158 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., hostname=e56de37b85b3,43199,1734237482035, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:39:53,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742222_1398 (size=12151) 2024-12-15T04:39:53,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-15T04:39:54,051 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=193 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/6bebf8093d124ee69b1741d24e009587 2024-12-15T04:39:54,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/cec0657ecb7746788308aae37b2f4854 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/cec0657ecb7746788308aae37b2f4854 2024-12-15T04:39:54,058 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/cec0657ecb7746788308aae37b2f4854, entries=150, sequenceid=193, filesize=30.4 K 2024-12-15T04:39:54,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/a6315ae372df448aa202171e2102951f as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/a6315ae372df448aa202171e2102951f 2024-12-15T04:39:54,061 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/a6315ae372df448aa202171e2102951f, entries=150, sequenceid=193, filesize=11.9 K 2024-12-15T04:39:54,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/6bebf8093d124ee69b1741d24e009587 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/6bebf8093d124ee69b1741d24e009587 2024-12-15T04:39:54,065 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/6bebf8093d124ee69b1741d24e009587, entries=150, sequenceid=193, filesize=11.9 K 2024-12-15T04:39:54,066 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for bfdbd4565c936c59a93e348f03cec823 in 1259ms, sequenceid=193, compaction requested=false 2024-12-15T04:39:54,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:54,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:54,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-12-15T04:39:54,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-12-15T04:39:54,067 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-12-15T04:39:54,067 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4210 sec 2024-12-15T04:39:54,068 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.4230 sec 2024-12-15T04:39:54,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:54,105 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bfdbd4565c936c59a93e348f03cec823 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-15T04:39:54,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=A 2024-12-15T04:39:54,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:54,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=B 2024-12-15T04:39:54,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:54,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=C 2024-12-15T04:39:54,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:54,110 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412150a1235a89b6f42b5947965ca8c1ec837_bfdbd4565c936c59a93e348f03cec823 is 50, key is test_row_0/A:col10/1734237594104/Put/seqid=0 2024-12-15T04:39:54,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742223_1399 (size=14794) 2024-12-15T04:39:54,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:54,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237654183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:54,290 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:54,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237654289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:54,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:54,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50038 deadline: 1734237654372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:54,376 DEBUG [Thread-1566 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8171 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., hostname=e56de37b85b3,43199,1734237482035, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:39:54,495 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:54,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237654492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:54,514 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:54,517 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412150a1235a89b6f42b5947965ca8c1ec837_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412150a1235a89b6f42b5947965ca8c1ec837_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:54,517 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/2d3e4700729c4c559d679c606aef3fbb, store: [table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:54,518 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/2d3e4700729c4c559d679c606aef3fbb is 175, key is test_row_0/A:col10/1734237594104/Put/seqid=0 2024-12-15T04:39:54,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742224_1400 (size=39749) 2024-12-15T04:39:54,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-15T04:39:54,749 INFO [Thread-1576 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-12-15T04:39:54,750 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:39:54,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-12-15T04:39:54,751 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:39:54,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-15T04:39:54,751 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:39:54,751 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:39:54,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:54,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237654797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:54,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-15T04:39:54,902 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:54,903 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-15T04:39:54,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:54,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:54,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:54,903 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:54,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:54,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:54,921 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=211, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/2d3e4700729c4c559d679c606aef3fbb 2024-12-15T04:39:54,932 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/214c9af8ec064ca998f89751a52a5568 is 50, key is test_row_0/B:col10/1734237594104/Put/seqid=0 2024-12-15T04:39:54,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742225_1401 (size=12151) 2024-12-15T04:39:55,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-15T04:39:55,055 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:55,055 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-15T04:39:55,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:55,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:55,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:55,055 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:55,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:55,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:55,207 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:55,207 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-15T04:39:55,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:55,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:55,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:55,208 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:55,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:55,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:55,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:55,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237655303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:55,336 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/214c9af8ec064ca998f89751a52a5568 2024-12-15T04:39:55,342 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/3518ce0289b54783bb88d98273524fc0 is 50, key is test_row_0/C:col10/1734237594104/Put/seqid=0 2024-12-15T04:39:55,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742226_1402 (size=12151) 2024-12-15T04:39:55,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-15T04:39:55,359 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:55,360 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-15T04:39:55,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:55,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:55,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:55,360 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:55,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:55,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:55,511 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:55,512 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-15T04:39:55,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:55,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:55,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:55,512 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:55,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:55,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:55,664 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:55,664 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-15T04:39:55,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:55,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:55,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:55,664 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:55,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:55,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:55,745 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/3518ce0289b54783bb88d98273524fc0 2024-12-15T04:39:55,748 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/2d3e4700729c4c559d679c606aef3fbb as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/2d3e4700729c4c559d679c606aef3fbb 2024-12-15T04:39:55,751 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/2d3e4700729c4c559d679c606aef3fbb, entries=200, sequenceid=211, filesize=38.8 K 2024-12-15T04:39:55,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/214c9af8ec064ca998f89751a52a5568 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/214c9af8ec064ca998f89751a52a5568 2024-12-15T04:39:55,754 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/214c9af8ec064ca998f89751a52a5568, entries=150, sequenceid=211, filesize=11.9 K 2024-12-15T04:39:55,755 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/3518ce0289b54783bb88d98273524fc0 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/3518ce0289b54783bb88d98273524fc0 2024-12-15T04:39:55,758 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/3518ce0289b54783bb88d98273524fc0, entries=150, sequenceid=211, filesize=11.9 K 2024-12-15T04:39:55,758 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for bfdbd4565c936c59a93e348f03cec823 in 1653ms, sequenceid=211, compaction requested=true 2024-12-15T04:39:55,758 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:55,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfdbd4565c936c59a93e348f03cec823:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:39:55,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:55,759 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:55,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfdbd4565c936c59a93e348f03cec823:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:39:55,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:55,759 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:55,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfdbd4565c936c59a93e348f03cec823:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:39:55,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:55,759 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102335 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:55,759 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:55,759 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): bfdbd4565c936c59a93e348f03cec823/B is initiating minor compaction (all files) 2024-12-15T04:39:55,759 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): bfdbd4565c936c59a93e348f03cec823/A is initiating minor compaction (all files) 2024-12-15T04:39:55,759 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bfdbd4565c936c59a93e348f03cec823/A in TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:55,759 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bfdbd4565c936c59a93e348f03cec823/B in TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:55,759 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/009a982a253c496ba87a9e2bf9207c79, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/cec0657ecb7746788308aae37b2f4854, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/2d3e4700729c4c559d679c606aef3fbb] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp, totalSize=99.9 K 2024-12-15T04:39:55,759 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/f6f8b6a55f3d499ab25814c332d82cac, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/a6315ae372df448aa202171e2102951f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/214c9af8ec064ca998f89751a52a5568] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp, totalSize=36.0 K 2024-12-15T04:39:55,760 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:55,760 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. files: [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/009a982a253c496ba87a9e2bf9207c79, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/cec0657ecb7746788308aae37b2f4854, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/2d3e4700729c4c559d679c606aef3fbb] 2024-12-15T04:39:55,760 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting f6f8b6a55f3d499ab25814c332d82cac, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734237591096 2024-12-15T04:39:55,760 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 009a982a253c496ba87a9e2bf9207c79, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734237591096 2024-12-15T04:39:55,760 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting a6315ae372df448aa202171e2102951f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1734237592306 2024-12-15T04:39:55,760 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting cec0657ecb7746788308aae37b2f4854, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1734237592306 2024-12-15T04:39:55,760 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 214c9af8ec064ca998f89751a52a5568, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734237592957 2024-12-15T04:39:55,760 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d3e4700729c4c559d679c606aef3fbb, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734237592957 2024-12-15T04:39:55,766 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:55,767 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241215cb38d1f8f59244edabfcdc8a0877525b_bfdbd4565c936c59a93e348f03cec823 store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:55,769 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241215cb38d1f8f59244edabfcdc8a0877525b_bfdbd4565c936c59a93e348f03cec823, store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:55,769 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215cb38d1f8f59244edabfcdc8a0877525b_bfdbd4565c936c59a93e348f03cec823 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:55,769 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfdbd4565c936c59a93e348f03cec823#B#compaction#334 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:55,770 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/bffd89d62b7d40358c71a59f93ba7dbb is 50, key is test_row_0/B:col10/1734237594104/Put/seqid=0 2024-12-15T04:39:55,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742228_1404 (size=12629) 2024-12-15T04:39:55,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742227_1403 (size=4469) 2024-12-15T04:39:55,780 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/bffd89d62b7d40358c71a59f93ba7dbb as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/bffd89d62b7d40358c71a59f93ba7dbb 2024-12-15T04:39:55,783 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bfdbd4565c936c59a93e348f03cec823/B of bfdbd4565c936c59a93e348f03cec823 into bffd89d62b7d40358c71a59f93ba7dbb(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:55,783 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:55,783 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., storeName=bfdbd4565c936c59a93e348f03cec823/B, priority=13, startTime=1734237595759; duration=0sec 2024-12-15T04:39:55,783 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:55,783 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfdbd4565c936c59a93e348f03cec823:B 2024-12-15T04:39:55,783 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:55,784 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:55,784 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): bfdbd4565c936c59a93e348f03cec823/C is initiating minor compaction (all files) 2024-12-15T04:39:55,784 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bfdbd4565c936c59a93e348f03cec823/C in TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:55,784 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/fd183f7357ad4941a43f20be330ee6e7, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/6bebf8093d124ee69b1741d24e009587, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/3518ce0289b54783bb88d98273524fc0] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp, totalSize=36.0 K 2024-12-15T04:39:55,785 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting fd183f7357ad4941a43f20be330ee6e7, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1734237591096 2024-12-15T04:39:55,785 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 6bebf8093d124ee69b1741d24e009587, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=193, earliestPutTs=1734237592306 2024-12-15T04:39:55,785 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 3518ce0289b54783bb88d98273524fc0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734237592957 2024-12-15T04:39:55,790 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfdbd4565c936c59a93e348f03cec823#C#compaction#335 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:55,790 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/fc81f8fb22fa4655bc46f49b1105da43 is 50, key is test_row_0/C:col10/1734237594104/Put/seqid=0 2024-12-15T04:39:55,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742229_1405 (size=12629) 2024-12-15T04:39:55,816 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:55,816 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-12-15T04:39:55,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:55,816 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing bfdbd4565c936c59a93e348f03cec823 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-12-15T04:39:55,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=A 2024-12-15T04:39:55,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:55,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=B 2024-12-15T04:39:55,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:55,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=C 2024-12-15T04:39:55,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:55,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215feb315607cd14d0f99fb0d4f1cc1baf5_bfdbd4565c936c59a93e348f03cec823 is 50, key is test_row_0/A:col10/1734237594161/Put/seqid=0 2024-12-15T04:39:55,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742230_1406 (size=12304) 2024-12-15T04:39:55,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-15T04:39:56,173 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfdbd4565c936c59a93e348f03cec823#A#compaction#333 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:56,174 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/fe892e71442b435881356a2c748c752b is 175, key is test_row_0/A:col10/1734237594104/Put/seqid=0 2024-12-15T04:39:56,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742231_1407 (size=31583) 2024-12-15T04:39:56,197 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/fc81f8fb22fa4655bc46f49b1105da43 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/fc81f8fb22fa4655bc46f49b1105da43 2024-12-15T04:39:56,200 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bfdbd4565c936c59a93e348f03cec823/C of bfdbd4565c936c59a93e348f03cec823 into fc81f8fb22fa4655bc46f49b1105da43(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:56,200 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:56,200 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., storeName=bfdbd4565c936c59a93e348f03cec823/C, priority=13, startTime=1734237595759; duration=0sec 2024-12-15T04:39:56,200 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:56,200 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfdbd4565c936c59a93e348f03cec823:C 2024-12-15T04:39:56,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:56,227 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215feb315607cd14d0f99fb0d4f1cc1baf5_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215feb315607cd14d0f99fb0d4f1cc1baf5_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:56,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/c162f640792b4fd79f40bd4380376e9c, store: [table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:56,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/c162f640792b4fd79f40bd4380376e9c is 175, key is test_row_0/A:col10/1734237594161/Put/seqid=0 2024-12-15T04:39:56,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742232_1408 (size=31105) 2024-12-15T04:39:56,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:56,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:56,361 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:56,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237656355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:56,464 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:56,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237656462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:56,580 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/fe892e71442b435881356a2c748c752b as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/fe892e71442b435881356a2c748c752b 2024-12-15T04:39:56,584 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bfdbd4565c936c59a93e348f03cec823/A of bfdbd4565c936c59a93e348f03cec823 into fe892e71442b435881356a2c748c752b(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:56,584 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:56,584 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., storeName=bfdbd4565c936c59a93e348f03cec823/A, priority=13, startTime=1734237595758; duration=0sec 2024-12-15T04:39:56,584 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:56,584 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfdbd4565c936c59a93e348f03cec823:A 2024-12-15T04:39:56,632 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=233, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/c162f640792b4fd79f40bd4380376e9c 2024-12-15T04:39:56,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/fae37da85c254647b0755f6f25f93c5c is 50, key is test_row_0/B:col10/1734237594161/Put/seqid=0 2024-12-15T04:39:56,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742233_1409 (size=12151) 2024-12-15T04:39:56,641 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/fae37da85c254647b0755f6f25f93c5c 2024-12-15T04:39:56,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/7f5761d2872f4822ae8151f8d04b5bad is 50, key is test_row_0/C:col10/1734237594161/Put/seqid=0 2024-12-15T04:39:56,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742234_1410 (size=12151) 2024-12-15T04:39:56,649 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/7f5761d2872f4822ae8151f8d04b5bad 2024-12-15T04:39:56,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/c162f640792b4fd79f40bd4380376e9c as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/c162f640792b4fd79f40bd4380376e9c 2024-12-15T04:39:56,654 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/c162f640792b4fd79f40bd4380376e9c, entries=150, sequenceid=233, filesize=30.4 K 2024-12-15T04:39:56,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/fae37da85c254647b0755f6f25f93c5c as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/fae37da85c254647b0755f6f25f93c5c 2024-12-15T04:39:56,657 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/fae37da85c254647b0755f6f25f93c5c, entries=150, sequenceid=233, filesize=11.9 K 2024-12-15T04:39:56,658 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/7f5761d2872f4822ae8151f8d04b5bad as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/7f5761d2872f4822ae8151f8d04b5bad 2024-12-15T04:39:56,660 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/7f5761d2872f4822ae8151f8d04b5bad, entries=150, sequenceid=233, filesize=11.9 K 2024-12-15T04:39:56,661 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for bfdbd4565c936c59a93e348f03cec823 in 845ms, sequenceid=233, compaction requested=false 2024-12-15T04:39:56,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:56,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:56,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-12-15T04:39:56,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-12-15T04:39:56,664 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-15T04:39:56,664 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9110 sec 2024-12-15T04:39:56,665 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 1.9140 sec 2024-12-15T04:39:56,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:56,669 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bfdbd4565c936c59a93e348f03cec823 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-12-15T04:39:56,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=A 2024-12-15T04:39:56,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:56,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=B 2024-12-15T04:39:56,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:56,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=C 2024-12-15T04:39:56,669 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:56,673 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215d90d0ef585e74218b95dbaa8622dfe82_bfdbd4565c936c59a93e348f03cec823 is 50, key is test_row_0/A:col10/1734237596354/Put/seqid=0 2024-12-15T04:39:56,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742235_1411 (size=14794) 2024-12-15T04:39:56,745 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:56,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237656742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:56,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:56,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237656846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:56,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-12-15T04:39:56,854 INFO [Thread-1576 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-12-15T04:39:56,855 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:39:56,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-12-15T04:39:56,856 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:39:56,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-15T04:39:56,857 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:39:56,857 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:39:56,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-15T04:39:57,008 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:57,009 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-15T04:39:57,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:57,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:57,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:57,009 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:57,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:57,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:57,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:57,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237657050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:57,077 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:57,079 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215d90d0ef585e74218b95dbaa8622dfe82_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215d90d0ef585e74218b95dbaa8622dfe82_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:57,080 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/1d3fd9ad62af4d49a966d88d300632c3, store: [table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:57,081 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/1d3fd9ad62af4d49a966d88d300632c3 is 175, key is test_row_0/A:col10/1734237596354/Put/seqid=0 2024-12-15T04:39:57,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742236_1412 (size=39749) 2024-12-15T04:39:57,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-15T04:39:57,160 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:57,161 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-15T04:39:57,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:57,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:57,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:57,161 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:57,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:57,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:57,313 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:57,313 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-15T04:39:57,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:57,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:57,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:57,313 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:57,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:57,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:57,357 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:57,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237657354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:57,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-15T04:39:57,464 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:57,464 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-15T04:39:57,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:57,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:57,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:57,464 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:57,465 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:57,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:57,485 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=251, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/1d3fd9ad62af4d49a966d88d300632c3 2024-12-15T04:39:57,489 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/311d5fd7855c4350843e857d44dcb18e is 50, key is test_row_0/B:col10/1734237596354/Put/seqid=0 2024-12-15T04:39:57,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742237_1413 (size=12151) 2024-12-15T04:39:57,616 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:57,616 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-15T04:39:57,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:57,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:57,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:57,616 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:57,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:57,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:57,665 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:57,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49986 deadline: 1734237657663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:57,665 DEBUG [Thread-1568 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8174 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., hostname=e56de37b85b3,43199,1734237482035, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:39:57,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:57,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50012 deadline: 1734237657666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:57,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:57,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:50000 deadline: 1734237657667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:57,669 DEBUG [Thread-1572 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8177 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., hostname=e56de37b85b3,43199,1734237482035, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:39:57,669 DEBUG [Thread-1570 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8182 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., hostname=e56de37b85b3,43199,1734237482035, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:39:57,768 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:57,768 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-15T04:39:57,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:57,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:57,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:57,768 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:57,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:57,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:57,862 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:57,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237657861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:57,892 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/311d5fd7855c4350843e857d44dcb18e 2024-12-15T04:39:57,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/b79f023f3fe94a3c9390628429390492 is 50, key is test_row_0/C:col10/1734237596354/Put/seqid=0 2024-12-15T04:39:57,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742238_1414 (size=12151) 2024-12-15T04:39:57,920 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:57,920 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-15T04:39:57,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:57,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:57,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:57,920 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:57,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:57,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:57,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-15T04:39:58,072 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:58,072 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-15T04:39:58,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:58,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:58,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:58,073 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:58,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:58,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:58,224 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:58,224 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-15T04:39:58,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:58,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:58,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:58,225 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:58,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:58,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:39:58,301 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/b79f023f3fe94a3c9390628429390492 2024-12-15T04:39:58,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/1d3fd9ad62af4d49a966d88d300632c3 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1d3fd9ad62af4d49a966d88d300632c3 2024-12-15T04:39:58,307 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1d3fd9ad62af4d49a966d88d300632c3, entries=200, sequenceid=251, filesize=38.8 K 2024-12-15T04:39:58,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/311d5fd7855c4350843e857d44dcb18e as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/311d5fd7855c4350843e857d44dcb18e 2024-12-15T04:39:58,311 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/311d5fd7855c4350843e857d44dcb18e, entries=150, sequenceid=251, filesize=11.9 K 2024-12-15T04:39:58,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/b79f023f3fe94a3c9390628429390492 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/b79f023f3fe94a3c9390628429390492 2024-12-15T04:39:58,314 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/b79f023f3fe94a3c9390628429390492, entries=150, sequenceid=251, filesize=11.9 K 2024-12-15T04:39:58,314 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for bfdbd4565c936c59a93e348f03cec823 in 1645ms, sequenceid=251, compaction requested=true 2024-12-15T04:39:58,315 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:58,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfdbd4565c936c59a93e348f03cec823:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:39:58,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:58,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfdbd4565c936c59a93e348f03cec823:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:39:58,315 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:58,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:58,315 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:58,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfdbd4565c936c59a93e348f03cec823:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:39:58,315 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:58,315 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102437 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:58,315 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:58,316 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): bfdbd4565c936c59a93e348f03cec823/A is initiating minor compaction (all files) 2024-12-15T04:39:58,316 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): bfdbd4565c936c59a93e348f03cec823/B is initiating minor compaction (all files) 2024-12-15T04:39:58,316 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bfdbd4565c936c59a93e348f03cec823/A in TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:58,316 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bfdbd4565c936c59a93e348f03cec823/B in TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:58,316 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/fe892e71442b435881356a2c748c752b, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/c162f640792b4fd79f40bd4380376e9c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1d3fd9ad62af4d49a966d88d300632c3] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp, totalSize=100.0 K 2024-12-15T04:39:58,316 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/bffd89d62b7d40358c71a59f93ba7dbb, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/fae37da85c254647b0755f6f25f93c5c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/311d5fd7855c4350843e857d44dcb18e] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp, totalSize=36.1 K 2024-12-15T04:39:58,316 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:58,316 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. files: [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/fe892e71442b435881356a2c748c752b, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/c162f640792b4fd79f40bd4380376e9c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1d3fd9ad62af4d49a966d88d300632c3] 2024-12-15T04:39:58,316 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting bffd89d62b7d40358c71a59f93ba7dbb, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734237592957 2024-12-15T04:39:58,316 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe892e71442b435881356a2c748c752b, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734237592957 2024-12-15T04:39:58,316 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting fae37da85c254647b0755f6f25f93c5c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1734237594161 2024-12-15T04:39:58,316 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting c162f640792b4fd79f40bd4380376e9c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1734237594161 2024-12-15T04:39:58,317 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 311d5fd7855c4350843e857d44dcb18e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734237596350 2024-12-15T04:39:58,317 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d3fd9ad62af4d49a966d88d300632c3, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734237596344 2024-12-15T04:39:58,322 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfdbd4565c936c59a93e348f03cec823#B#compaction#342 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:58,322 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/e5a32c529e4c43729a7641a1902705a9 is 50, key is test_row_0/B:col10/1734237596354/Put/seqid=0 2024-12-15T04:39:58,323 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:58,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742239_1415 (size=12731) 2024-12-15T04:39:58,326 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241215cbffddd64ce74de29532529b081a3d3c_bfdbd4565c936c59a93e348f03cec823 store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:58,328 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241215cbffddd64ce74de29532529b081a3d3c_bfdbd4565c936c59a93e348f03cec823, store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:58,328 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215cbffddd64ce74de29532529b081a3d3c_bfdbd4565c936c59a93e348f03cec823 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:58,334 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/e5a32c529e4c43729a7641a1902705a9 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/e5a32c529e4c43729a7641a1902705a9 2024-12-15T04:39:58,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742240_1416 (size=4469) 2024-12-15T04:39:58,338 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bfdbd4565c936c59a93e348f03cec823/B of bfdbd4565c936c59a93e348f03cec823 into e5a32c529e4c43729a7641a1902705a9(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:58,338 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:58,338 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., storeName=bfdbd4565c936c59a93e348f03cec823/B, priority=13, startTime=1734237598315; duration=0sec 2024-12-15T04:39:58,338 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:39:58,338 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfdbd4565c936c59a93e348f03cec823:B 2024-12-15T04:39:58,338 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:39:58,339 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:39:58,339 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): bfdbd4565c936c59a93e348f03cec823/C is initiating minor compaction (all files) 2024-12-15T04:39:58,339 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bfdbd4565c936c59a93e348f03cec823/C in TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:58,339 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/fc81f8fb22fa4655bc46f49b1105da43, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/7f5761d2872f4822ae8151f8d04b5bad, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/b79f023f3fe94a3c9390628429390492] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp, totalSize=36.1 K 2024-12-15T04:39:58,339 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting fc81f8fb22fa4655bc46f49b1105da43, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734237592957 2024-12-15T04:39:58,340 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f5761d2872f4822ae8151f8d04b5bad, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1734237594161 2024-12-15T04:39:58,340 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting b79f023f3fe94a3c9390628429390492, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734237596350 2024-12-15T04:39:58,345 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfdbd4565c936c59a93e348f03cec823#C#compaction#344 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:58,345 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/ed020037d56c4c2786162214a7c3b801 is 50, key is test_row_0/C:col10/1734237596354/Put/seqid=0 2024-12-15T04:39:58,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742241_1417 (size=12731) 2024-12-15T04:39:58,376 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:39:58,377 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-12-15T04:39:58,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:58,377 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing bfdbd4565c936c59a93e348f03cec823 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-15T04:39:58,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=A 2024-12-15T04:39:58,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:58,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=B 2024-12-15T04:39:58,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:58,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=C 2024-12-15T04:39:58,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:58,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412155e549c0ff707424bb65ed0ce8acf121d_bfdbd4565c936c59a93e348f03cec823 is 50, key is test_row_0/A:col10/1734237596722/Put/seqid=0 2024-12-15T04:39:58,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742242_1418 (size=12454) 2024-12-15T04:39:58,739 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfdbd4565c936c59a93e348f03cec823#A#compaction#343 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:39:58,739 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/1ee8b813c5834e99a84b5ac1c011df5b is 175, key is test_row_0/A:col10/1734237596354/Put/seqid=0 2024-12-15T04:39:58,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742243_1419 (size=31685) 2024-12-15T04:39:58,751 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/ed020037d56c4c2786162214a7c3b801 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/ed020037d56c4c2786162214a7c3b801 2024-12-15T04:39:58,755 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bfdbd4565c936c59a93e348f03cec823/C of bfdbd4565c936c59a93e348f03cec823 into ed020037d56c4c2786162214a7c3b801(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:58,755 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:58,755 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., storeName=bfdbd4565c936c59a93e348f03cec823/C, priority=13, startTime=1734237598315; duration=0sec 2024-12-15T04:39:58,755 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:58,755 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfdbd4565c936c59a93e348f03cec823:C 2024-12-15T04:39:58,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:58,788 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412155e549c0ff707424bb65ed0ce8acf121d_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412155e549c0ff707424bb65ed0ce8acf121d_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:58,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/8681e7c4b6e04b03b278e1c80d851bc8, store: [table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:58,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/8681e7c4b6e04b03b278e1c80d851bc8 is 175, key is test_row_0/A:col10/1734237596722/Put/seqid=0 2024-12-15T04:39:58,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742244_1420 (size=31255) 2024-12-15T04:39:58,794 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=272, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/8681e7c4b6e04b03b278e1c80d851bc8 2024-12-15T04:39:58,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/1dbb62c0bf274fd386aba4ca9d60f8e2 is 50, key is test_row_0/B:col10/1734237596722/Put/seqid=0 2024-12-15T04:39:58,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742245_1421 (size=12301) 2024-12-15T04:39:58,870 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. as already flushing 2024-12-15T04:39:58,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:58,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:58,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237658924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:58,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-15T04:39:59,032 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:59,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237659030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:59,145 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/1ee8b813c5834e99a84b5ac1c011df5b as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1ee8b813c5834e99a84b5ac1c011df5b 2024-12-15T04:39:59,148 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bfdbd4565c936c59a93e348f03cec823/A of bfdbd4565c936c59a93e348f03cec823 into 1ee8b813c5834e99a84b5ac1c011df5b(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:39:59,148 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:59,148 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., storeName=bfdbd4565c936c59a93e348f03cec823/A, priority=13, startTime=1734237598315; duration=0sec 2024-12-15T04:39:59,149 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:39:59,149 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfdbd4565c936c59a93e348f03cec823:A 2024-12-15T04:39:59,205 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/1dbb62c0bf274fd386aba4ca9d60f8e2 2024-12-15T04:39:59,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/61f6df276e8a49ad9f7a30240276f4ba is 50, key is test_row_0/C:col10/1734237596722/Put/seqid=0 2024-12-15T04:39:59,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742246_1422 (size=12301) 2024-12-15T04:39:59,214 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/61f6df276e8a49ad9f7a30240276f4ba 2024-12-15T04:39:59,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/8681e7c4b6e04b03b278e1c80d851bc8 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/8681e7c4b6e04b03b278e1c80d851bc8 2024-12-15T04:39:59,219 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/8681e7c4b6e04b03b278e1c80d851bc8, entries=150, sequenceid=272, filesize=30.5 K 2024-12-15T04:39:59,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/1dbb62c0bf274fd386aba4ca9d60f8e2 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/1dbb62c0bf274fd386aba4ca9d60f8e2 2024-12-15T04:39:59,223 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/1dbb62c0bf274fd386aba4ca9d60f8e2, entries=150, sequenceid=272, filesize=12.0 K 2024-12-15T04:39:59,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/61f6df276e8a49ad9f7a30240276f4ba as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/61f6df276e8a49ad9f7a30240276f4ba 2024-12-15T04:39:59,227 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/61f6df276e8a49ad9f7a30240276f4ba, entries=150, sequenceid=272, filesize=12.0 K 2024-12-15T04:39:59,227 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for bfdbd4565c936c59a93e348f03cec823 in 850ms, sequenceid=272, compaction requested=false 2024-12-15T04:39:59,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:39:59,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:39:59,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-12-15T04:39:59,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-12-15T04:39:59,229 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-12-15T04:39:59,229 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3710 sec 2024-12-15T04:39:59,230 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 2.3740 sec 2024-12-15T04:39:59,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:59,235 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bfdbd4565c936c59a93e348f03cec823 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-15T04:39:59,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=A 2024-12-15T04:39:59,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:59,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=B 2024-12-15T04:39:59,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:59,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=C 2024-12-15T04:39:59,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:39:59,241 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215fce9712e82744534975ce81ef09af6e3_bfdbd4565c936c59a93e348f03cec823 is 50, key is test_row_0/A:col10/1734237598895/Put/seqid=0 2024-12-15T04:39:59,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742247_1423 (size=14994) 2024-12-15T04:39:59,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:59,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237659324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:59,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:59,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237659429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:59,634 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:59,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237659633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:39:59,654 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:39:59,657 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215fce9712e82744534975ce81ef09af6e3_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215fce9712e82744534975ce81ef09af6e3_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:39:59,657 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/702557a59e2548c2aff0216fb63b336d, store: [table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:39:59,658 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/702557a59e2548c2aff0216fb63b336d is 175, key is test_row_0/A:col10/1734237598895/Put/seqid=0 2024-12-15T04:39:59,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742248_1424 (size=39949) 2024-12-15T04:39:59,662 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=291, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/702557a59e2548c2aff0216fb63b336d 2024-12-15T04:39:59,668 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/ce32bccd0f354b4d8f6d8bfd88f8c374 is 50, key is test_row_0/B:col10/1734237598895/Put/seqid=0 2024-12-15T04:39:59,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742249_1425 (size=12301) 2024-12-15T04:39:59,938 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:39:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237659936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:00,072 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/ce32bccd0f354b4d8f6d8bfd88f8c374 2024-12-15T04:40:00,076 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/9b99cdada39848d49aeef29404402b2c is 50, key is test_row_0/C:col10/1734237598895/Put/seqid=0 2024-12-15T04:40:00,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742250_1426 (size=12301) 2024-12-15T04:40:00,123 DEBUG [Thread-1585 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3512017b to 127.0.0.1:55935 2024-12-15T04:40:00,123 DEBUG [Thread-1585 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:40:00,123 DEBUG [Thread-1577 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x798e7fd4 to 127.0.0.1:55935 2024-12-15T04:40:00,123 DEBUG [Thread-1577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:40:00,126 DEBUG [Thread-1583 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3fa53591 to 127.0.0.1:55935 2024-12-15T04:40:00,126 DEBUG [Thread-1579 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7284f16d to 127.0.0.1:55935 2024-12-15T04:40:00,126 DEBUG [Thread-1583 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:40:00,126 DEBUG [Thread-1579 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:40:00,127 DEBUG [Thread-1581 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x37a637ac to 127.0.0.1:55935 2024-12-15T04:40:00,127 DEBUG [Thread-1581 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:40:00,441 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:00,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:49988 deadline: 1734237660440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:00,481 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/9b99cdada39848d49aeef29404402b2c 2024-12-15T04:40:00,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/702557a59e2548c2aff0216fb63b336d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/702557a59e2548c2aff0216fb63b336d 2024-12-15T04:40:00,494 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/702557a59e2548c2aff0216fb63b336d, entries=200, sequenceid=291, filesize=39.0 K 2024-12-15T04:40:00,495 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/ce32bccd0f354b4d8f6d8bfd88f8c374 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/ce32bccd0f354b4d8f6d8bfd88f8c374 2024-12-15T04:40:00,500 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/ce32bccd0f354b4d8f6d8bfd88f8c374, entries=150, sequenceid=291, filesize=12.0 K 2024-12-15T04:40:00,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/9b99cdada39848d49aeef29404402b2c as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/9b99cdada39848d49aeef29404402b2c 2024-12-15T04:40:00,503 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/9b99cdada39848d49aeef29404402b2c, entries=150, sequenceid=291, filesize=12.0 K 2024-12-15T04:40:00,504 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for bfdbd4565c936c59a93e348f03cec823 in 1269ms, sequenceid=291, compaction requested=true 2024-12-15T04:40:00,504 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:40:00,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfdbd4565c936c59a93e348f03cec823:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:40:00,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:00,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfdbd4565c936c59a93e348f03cec823:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:40:00,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:00,504 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:00,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bfdbd4565c936c59a93e348f03cec823:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:40:00,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:00,504 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:00,505 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102889 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:00,505 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:00,505 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): bfdbd4565c936c59a93e348f03cec823/A is initiating minor compaction (all files) 2024-12-15T04:40:00,505 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): bfdbd4565c936c59a93e348f03cec823/B is initiating minor compaction (all files) 2024-12-15T04:40:00,505 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bfdbd4565c936c59a93e348f03cec823/A in TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:40:00,505 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bfdbd4565c936c59a93e348f03cec823/B in TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:40:00,505 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1ee8b813c5834e99a84b5ac1c011df5b, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/8681e7c4b6e04b03b278e1c80d851bc8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/702557a59e2548c2aff0216fb63b336d] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp, totalSize=100.5 K 2024-12-15T04:40:00,505 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/e5a32c529e4c43729a7641a1902705a9, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/1dbb62c0bf274fd386aba4ca9d60f8e2, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/ce32bccd0f354b4d8f6d8bfd88f8c374] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp, totalSize=36.5 K 2024-12-15T04:40:00,505 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:40:00,505 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. files: [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1ee8b813c5834e99a84b5ac1c011df5b, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/8681e7c4b6e04b03b278e1c80d851bc8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/702557a59e2548c2aff0216fb63b336d] 2024-12-15T04:40:00,505 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting e5a32c529e4c43729a7641a1902705a9, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734237596350 2024-12-15T04:40:00,505 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1ee8b813c5834e99a84b5ac1c011df5b, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734237596350 2024-12-15T04:40:00,506 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 1dbb62c0bf274fd386aba4ca9d60f8e2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1734237596722 2024-12-15T04:40:00,506 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8681e7c4b6e04b03b278e1c80d851bc8, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1734237596722 2024-12-15T04:40:00,506 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting ce32bccd0f354b4d8f6d8bfd88f8c374, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1734237598895 2024-12-15T04:40:00,506 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 702557a59e2548c2aff0216fb63b336d, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1734237598895 2024-12-15T04:40:00,510 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:40:00,510 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfdbd4565c936c59a93e348f03cec823#B#compaction#351 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:00,511 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/b7ac45b93be749afabe20b7e16fde105 is 50, key is test_row_0/B:col10/1734237598895/Put/seqid=0 2024-12-15T04:40:00,511 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412157b24708cdae9441da4d1760ee53d2605_bfdbd4565c936c59a93e348f03cec823 store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:40:00,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742251_1427 (size=12983) 2024-12-15T04:40:00,514 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412157b24708cdae9441da4d1760ee53d2605_bfdbd4565c936c59a93e348f03cec823, store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:40:00,514 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412157b24708cdae9441da4d1760ee53d2605_bfdbd4565c936c59a93e348f03cec823 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:40:00,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742252_1428 (size=4469) 2024-12-15T04:40:00,541 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-15T04:40:00,922 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfdbd4565c936c59a93e348f03cec823#A#compaction#352 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:00,923 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/ba7d3acbbf5a4b12b4aea322388e38e8 is 175, key is test_row_0/A:col10/1734237598895/Put/seqid=0 2024-12-15T04:40:00,924 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/b7ac45b93be749afabe20b7e16fde105 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/b7ac45b93be749afabe20b7e16fde105 2024-12-15T04:40:00,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742253_1429 (size=31937) 2024-12-15T04:40:00,931 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bfdbd4565c936c59a93e348f03cec823/B of bfdbd4565c936c59a93e348f03cec823 into b7ac45b93be749afabe20b7e16fde105(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:00,931 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:40:00,931 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., storeName=bfdbd4565c936c59a93e348f03cec823/B, priority=13, startTime=1734237600504; duration=0sec 2024-12-15T04:40:00,931 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:00,931 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfdbd4565c936c59a93e348f03cec823:B 2024-12-15T04:40:00,931 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:00,932 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:00,932 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): bfdbd4565c936c59a93e348f03cec823/C is initiating minor compaction (all files) 2024-12-15T04:40:00,932 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bfdbd4565c936c59a93e348f03cec823/C in TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:40:00,932 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/ed020037d56c4c2786162214a7c3b801, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/61f6df276e8a49ad9f7a30240276f4ba, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/9b99cdada39848d49aeef29404402b2c] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp, totalSize=36.5 K 2024-12-15T04:40:00,933 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting ed020037d56c4c2786162214a7c3b801, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1734237596350 2024-12-15T04:40:00,933 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 61f6df276e8a49ad9f7a30240276f4ba, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=272, earliestPutTs=1734237596722 2024-12-15T04:40:00,934 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 9b99cdada39848d49aeef29404402b2c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1734237598895 2024-12-15T04:40:00,941 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bfdbd4565c936c59a93e348f03cec823#C#compaction#353 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:00,942 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/db31345efa794ec18d5cb6918e107a98 is 50, key is test_row_0/C:col10/1734237598895/Put/seqid=0 2024-12-15T04:40:00,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742254_1430 (size=12983) 2024-12-15T04:40:00,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-12-15T04:40:00,961 INFO [Thread-1576 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-12-15T04:40:01,340 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/ba7d3acbbf5a4b12b4aea322388e38e8 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/ba7d3acbbf5a4b12b4aea322388e38e8 2024-12-15T04:40:01,346 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bfdbd4565c936c59a93e348f03cec823/A of bfdbd4565c936c59a93e348f03cec823 into ba7d3acbbf5a4b12b4aea322388e38e8(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:01,346 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:40:01,346 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., storeName=bfdbd4565c936c59a93e348f03cec823/A, priority=13, startTime=1734237600504; duration=0sec 2024-12-15T04:40:01,346 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:01,346 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfdbd4565c936c59a93e348f03cec823:A 2024-12-15T04:40:01,350 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/db31345efa794ec18d5cb6918e107a98 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/db31345efa794ec18d5cb6918e107a98 2024-12-15T04:40:01,356 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bfdbd4565c936c59a93e348f03cec823/C of bfdbd4565c936c59a93e348f03cec823 into db31345efa794ec18d5cb6918e107a98(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:01,356 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:40:01,356 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823., storeName=bfdbd4565c936c59a93e348f03cec823/C, priority=13, startTime=1734237600504; duration=0sec 2024-12-15T04:40:01,356 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:01,356 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bfdbd4565c936c59a93e348f03cec823:C 2024-12-15T04:40:01,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:01,447 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bfdbd4565c936c59a93e348f03cec823 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-15T04:40:01,447 DEBUG [Thread-1574 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6584e9ce to 127.0.0.1:55935 2024-12-15T04:40:01,447 DEBUG [Thread-1574 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:40:01,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=A 2024-12-15T04:40:01,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:01,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=B 2024-12-15T04:40:01,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:01,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=C 2024-12-15T04:40:01,448 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:01,460 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215979eea398d54407699342164459ba086_bfdbd4565c936c59a93e348f03cec823 is 50, key is test_row_0/A:col10/1734237599318/Put/seqid=0 2024-12-15T04:40:01,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742255_1431 (size=12454) 2024-12-15T04:40:01,865 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:01,874 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215979eea398d54407699342164459ba086_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215979eea398d54407699342164459ba086_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:01,875 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/6ffb8a62d70f46479dd4e35fbdfd7689, store: [table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:40:01,875 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/6ffb8a62d70f46479dd4e35fbdfd7689 is 175, key is test_row_0/A:col10/1734237599318/Put/seqid=0 2024-12-15T04:40:01,879 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742256_1432 (size=31255) 2024-12-15T04:40:02,280 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=314, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/6ffb8a62d70f46479dd4e35fbdfd7689 2024-12-15T04:40:02,296 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/d1dc267d31964c118dbe106b8715b173 is 50, key is test_row_0/B:col10/1734237599318/Put/seqid=0 2024-12-15T04:40:02,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742257_1433 (size=12301) 2024-12-15T04:40:02,702 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/d1dc267d31964c118dbe106b8715b173 2024-12-15T04:40:02,712 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/c6aa968d31ea4d5182f330aa45e6bebf is 50, key is test_row_0/C:col10/1734237599318/Put/seqid=0 2024-12-15T04:40:02,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742258_1434 (size=12301) 2024-12-15T04:40:03,117 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/c6aa968d31ea4d5182f330aa45e6bebf 2024-12-15T04:40:03,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/6ffb8a62d70f46479dd4e35fbdfd7689 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/6ffb8a62d70f46479dd4e35fbdfd7689 2024-12-15T04:40:03,124 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/6ffb8a62d70f46479dd4e35fbdfd7689, entries=150, sequenceid=314, filesize=30.5 K 2024-12-15T04:40:03,124 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/d1dc267d31964c118dbe106b8715b173 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/d1dc267d31964c118dbe106b8715b173 2024-12-15T04:40:03,127 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/d1dc267d31964c118dbe106b8715b173, entries=150, sequenceid=314, filesize=12.0 K 2024-12-15T04:40:03,128 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/c6aa968d31ea4d5182f330aa45e6bebf as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/c6aa968d31ea4d5182f330aa45e6bebf 2024-12-15T04:40:03,130 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/c6aa968d31ea4d5182f330aa45e6bebf, entries=150, sequenceid=314, filesize=12.0 K 2024-12-15T04:40:03,131 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=0 B/0 for bfdbd4565c936c59a93e348f03cec823 in 1684ms, sequenceid=314, compaction requested=false 2024-12-15T04:40:03,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:40:04,436 DEBUG [Thread-1566 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6cd96549 to 127.0.0.1:55935 2024-12-15T04:40:04,436 DEBUG [Thread-1566 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:40:05,039 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/e56de37b85b3:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/83a74427aa674e86820702b65b739371, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/9d45a4e3d19d40b1948e0a657cc7a27d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/728e51d01762409bb33a8d1b60838c59, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/04ddfcb7333a4d848a300059cbfbe18c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/47ef864e674a43da80a6af4b16d531ad, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/a73dd3c3f97f457b9360d88995db9f21, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/a69b344ccb174592a99de97997b96778, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/eee28abbfc8343eeac4b461067727d01, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/25fa1f025ee5406a94835b054f8f9c92, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/b4c67c73958446a28551d92fd7d1fa39, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1ada5a77ee244604a548d664932f9874, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/009a982a253c496ba87a9e2bf9207c79, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/cec0657ecb7746788308aae37b2f4854, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/2d3e4700729c4c559d679c606aef3fbb, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/fe892e71442b435881356a2c748c752b, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/c162f640792b4fd79f40bd4380376e9c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1d3fd9ad62af4d49a966d88d300632c3, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1ee8b813c5834e99a84b5ac1c011df5b, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/8681e7c4b6e04b03b278e1c80d851bc8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/702557a59e2548c2aff0216fb63b336d] to archive 2024-12-15T04:40:05,041 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/e56de37b85b3:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-15T04:40:05,043 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/728e51d01762409bb33a8d1b60838c59 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/728e51d01762409bb33a8d1b60838c59 2024-12-15T04:40:05,043 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/9d45a4e3d19d40b1948e0a657cc7a27d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/9d45a4e3d19d40b1948e0a657cc7a27d 2024-12-15T04:40:05,043 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/47ef864e674a43da80a6af4b16d531ad to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/47ef864e674a43da80a6af4b16d531ad 2024-12-15T04:40:05,044 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/a69b344ccb174592a99de97997b96778 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/a69b344ccb174592a99de97997b96778 2024-12-15T04:40:05,044 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/83a74427aa674e86820702b65b739371 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/83a74427aa674e86820702b65b739371 2024-12-15T04:40:05,044 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/a73dd3c3f97f457b9360d88995db9f21 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/a73dd3c3f97f457b9360d88995db9f21 2024-12-15T04:40:05,044 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/04ddfcb7333a4d848a300059cbfbe18c to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/04ddfcb7333a4d848a300059cbfbe18c 2024-12-15T04:40:05,044 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/eee28abbfc8343eeac4b461067727d01 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/eee28abbfc8343eeac4b461067727d01 2024-12-15T04:40:05,045 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/25fa1f025ee5406a94835b054f8f9c92 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/25fa1f025ee5406a94835b054f8f9c92 2024-12-15T04:40:05,045 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1ada5a77ee244604a548d664932f9874 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1ada5a77ee244604a548d664932f9874 2024-12-15T04:40:05,045 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/009a982a253c496ba87a9e2bf9207c79 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/009a982a253c496ba87a9e2bf9207c79 2024-12-15T04:40:05,045 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/b4c67c73958446a28551d92fd7d1fa39 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/b4c67c73958446a28551d92fd7d1fa39 2024-12-15T04:40:05,045 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/cec0657ecb7746788308aae37b2f4854 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/cec0657ecb7746788308aae37b2f4854 2024-12-15T04:40:05,045 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/2d3e4700729c4c559d679c606aef3fbb to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/2d3e4700729c4c559d679c606aef3fbb 2024-12-15T04:40:05,045 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/c162f640792b4fd79f40bd4380376e9c to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/c162f640792b4fd79f40bd4380376e9c 2024-12-15T04:40:05,045 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/fe892e71442b435881356a2c748c752b to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/fe892e71442b435881356a2c748c752b 2024-12-15T04:40:05,046 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1d3fd9ad62af4d49a966d88d300632c3 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1d3fd9ad62af4d49a966d88d300632c3 2024-12-15T04:40:05,046 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1ee8b813c5834e99a84b5ac1c011df5b to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1ee8b813c5834e99a84b5ac1c011df5b 2024-12-15T04:40:05,046 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/8681e7c4b6e04b03b278e1c80d851bc8 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/8681e7c4b6e04b03b278e1c80d851bc8 2024-12-15T04:40:05,046 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/702557a59e2548c2aff0216fb63b336d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/702557a59e2548c2aff0216fb63b336d 2024-12-15T04:40:05,048 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/e56de37b85b3:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/6766e637d05348ea87a69a632f3a2005, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/c486a5e6ca144e26b2538df152438de2, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/fa54b6069f344f91bd8c6074cf0e61d9, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/d9e7a86df6224a179fccfa04ab0f7d46, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/978b810691d44806a05b0fd777ee6426, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/dfaf235bb01b4d5a9d1d8996e407f202, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/ae78262793c74039945c1f1121f8cf27, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/6b78fc08c6474f73b02c0b9623e48932, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/9e1ef6ced12d47c0962ebc76ca2536ac, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/ff5899d263694c2d92bf21189189460d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/f6f8b6a55f3d499ab25814c332d82cac, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/a199df69913c49e7b725f4b7551ffcfc, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/a6315ae372df448aa202171e2102951f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/bffd89d62b7d40358c71a59f93ba7dbb, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/214c9af8ec064ca998f89751a52a5568, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/fae37da85c254647b0755f6f25f93c5c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/e5a32c529e4c43729a7641a1902705a9, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/311d5fd7855c4350843e857d44dcb18e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/1dbb62c0bf274fd386aba4ca9d60f8e2, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/ce32bccd0f354b4d8f6d8bfd88f8c374] to archive 2024-12-15T04:40:05,049 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/e56de37b85b3:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-15T04:40:05,051 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/fa54b6069f344f91bd8c6074cf0e61d9 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/fa54b6069f344f91bd8c6074cf0e61d9 2024-12-15T04:40:05,051 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/c486a5e6ca144e26b2538df152438de2 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/c486a5e6ca144e26b2538df152438de2 2024-12-15T04:40:05,051 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/6766e637d05348ea87a69a632f3a2005 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/6766e637d05348ea87a69a632f3a2005 2024-12-15T04:40:05,051 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/6b78fc08c6474f73b02c0b9623e48932 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/6b78fc08c6474f73b02c0b9623e48932 2024-12-15T04:40:05,051 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/d9e7a86df6224a179fccfa04ab0f7d46 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/d9e7a86df6224a179fccfa04ab0f7d46 2024-12-15T04:40:05,051 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/978b810691d44806a05b0fd777ee6426 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/978b810691d44806a05b0fd777ee6426 2024-12-15T04:40:05,051 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/dfaf235bb01b4d5a9d1d8996e407f202 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/dfaf235bb01b4d5a9d1d8996e407f202 2024-12-15T04:40:05,052 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/ae78262793c74039945c1f1121f8cf27 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/ae78262793c74039945c1f1121f8cf27 2024-12-15T04:40:05,052 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/9e1ef6ced12d47c0962ebc76ca2536ac to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/9e1ef6ced12d47c0962ebc76ca2536ac 2024-12-15T04:40:05,052 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/a199df69913c49e7b725f4b7551ffcfc to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/a199df69913c49e7b725f4b7551ffcfc 2024-12-15T04:40:05,052 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/214c9af8ec064ca998f89751a52a5568 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/214c9af8ec064ca998f89751a52a5568 2024-12-15T04:40:05,052 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/f6f8b6a55f3d499ab25814c332d82cac to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/f6f8b6a55f3d499ab25814c332d82cac 2024-12-15T04:40:05,052 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/a6315ae372df448aa202171e2102951f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/a6315ae372df448aa202171e2102951f 2024-12-15T04:40:05,052 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/bffd89d62b7d40358c71a59f93ba7dbb to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/bffd89d62b7d40358c71a59f93ba7dbb 2024-12-15T04:40:05,053 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/ff5899d263694c2d92bf21189189460d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/ff5899d263694c2d92bf21189189460d 2024-12-15T04:40:05,053 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/fae37da85c254647b0755f6f25f93c5c to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/fae37da85c254647b0755f6f25f93c5c 2024-12-15T04:40:05,053 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/e5a32c529e4c43729a7641a1902705a9 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/e5a32c529e4c43729a7641a1902705a9 2024-12-15T04:40:05,053 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/311d5fd7855c4350843e857d44dcb18e to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/311d5fd7855c4350843e857d44dcb18e 2024-12-15T04:40:05,053 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/1dbb62c0bf274fd386aba4ca9d60f8e2 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/1dbb62c0bf274fd386aba4ca9d60f8e2 2024-12-15T04:40:05,053 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/ce32bccd0f354b4d8f6d8bfd88f8c374 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/ce32bccd0f354b4d8f6d8bfd88f8c374 2024-12-15T04:40:05,056 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/e56de37b85b3:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/d288711b72144438a8cdc56ef6d95453, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/ca94f680afce45be8e2a95665f71c55e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/150481eae44d4a32bf7432603d09e16d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/316e7ef6343a4952adf02220b49bfdda, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/e873e71e9aac4a81bcb5e9d972936603, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/5c642021f6a04d729ea2768472159143, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/5e8c0708dd87490d98812f2d23721bd8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/582dcdf8f3f14262bb2e219f3eecb738, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/d5c52fc31d5e4eb5a57f4ad72f9f793d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/283bcbb10d0b4012a98518bcec46b682, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/fd183f7357ad4941a43f20be330ee6e7, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/2bd17b9ff0734702be05ba6d19003cb5, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/6bebf8093d124ee69b1741d24e009587, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/fc81f8fb22fa4655bc46f49b1105da43, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/3518ce0289b54783bb88d98273524fc0, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/7f5761d2872f4822ae8151f8d04b5bad, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/ed020037d56c4c2786162214a7c3b801, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/b79f023f3fe94a3c9390628429390492, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/61f6df276e8a49ad9f7a30240276f4ba, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/9b99cdada39848d49aeef29404402b2c] to archive 2024-12-15T04:40:05,056 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/e56de37b85b3:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-15T04:40:05,057 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/d288711b72144438a8cdc56ef6d95453 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/d288711b72144438a8cdc56ef6d95453 2024-12-15T04:40:05,057 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/316e7ef6343a4952adf02220b49bfdda to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/316e7ef6343a4952adf02220b49bfdda 2024-12-15T04:40:05,057 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/150481eae44d4a32bf7432603d09e16d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/150481eae44d4a32bf7432603d09e16d 2024-12-15T04:40:05,058 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/ca94f680afce45be8e2a95665f71c55e to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/ca94f680afce45be8e2a95665f71c55e 2024-12-15T04:40:05,058 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/e873e71e9aac4a81bcb5e9d972936603 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/e873e71e9aac4a81bcb5e9d972936603 2024-12-15T04:40:05,058 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/5c642021f6a04d729ea2768472159143 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/5c642021f6a04d729ea2768472159143 2024-12-15T04:40:05,058 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/582dcdf8f3f14262bb2e219f3eecb738 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/582dcdf8f3f14262bb2e219f3eecb738 2024-12-15T04:40:05,058 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/5e8c0708dd87490d98812f2d23721bd8 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/5e8c0708dd87490d98812f2d23721bd8 2024-12-15T04:40:05,059 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/d5c52fc31d5e4eb5a57f4ad72f9f793d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/d5c52fc31d5e4eb5a57f4ad72f9f793d 2024-12-15T04:40:05,059 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/283bcbb10d0b4012a98518bcec46b682 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/283bcbb10d0b4012a98518bcec46b682 2024-12-15T04:40:05,059 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/2bd17b9ff0734702be05ba6d19003cb5 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/2bd17b9ff0734702be05ba6d19003cb5 2024-12-15T04:40:05,060 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/fd183f7357ad4941a43f20be330ee6e7 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/fd183f7357ad4941a43f20be330ee6e7 2024-12-15T04:40:05,060 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/fc81f8fb22fa4655bc46f49b1105da43 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/fc81f8fb22fa4655bc46f49b1105da43 2024-12-15T04:40:05,060 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/6bebf8093d124ee69b1741d24e009587 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/6bebf8093d124ee69b1741d24e009587 2024-12-15T04:40:05,060 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/3518ce0289b54783bb88d98273524fc0 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/3518ce0289b54783bb88d98273524fc0 2024-12-15T04:40:05,060 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/7f5761d2872f4822ae8151f8d04b5bad to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/7f5761d2872f4822ae8151f8d04b5bad 2024-12-15T04:40:05,061 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/ed020037d56c4c2786162214a7c3b801 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/ed020037d56c4c2786162214a7c3b801 2024-12-15T04:40:05,061 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/61f6df276e8a49ad9f7a30240276f4ba to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/61f6df276e8a49ad9f7a30240276f4ba 2024-12-15T04:40:05,061 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/b79f023f3fe94a3c9390628429390492 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/b79f023f3fe94a3c9390628429390492 2024-12-15T04:40:05,061 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/9b99cdada39848d49aeef29404402b2c to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/9b99cdada39848d49aeef29404402b2c 2024-12-15T04:40:07,676 DEBUG [Thread-1572 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2205f666 to 127.0.0.1:55935 2024-12-15T04:40:07,676 DEBUG [Thread-1572 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:40:07,681 DEBUG [Thread-1570 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1e247aa1 to 127.0.0.1:55935 2024-12-15T04:40:07,681 DEBUG [Thread-1570 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:40:07,748 DEBUG [Thread-1568 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x31aea41b to 127.0.0.1:55935 2024-12-15T04:40:07,748 DEBUG [Thread-1568 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:40:07,749 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-15T04:40:07,749 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 20 2024-12-15T04:40:07,749 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 32 2024-12-15T04:40:07,749 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 33 2024-12-15T04:40:07,749 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 32 2024-12-15T04:40:07,749 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 131 2024-12-15T04:40:07,749 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-15T04:40:07,749 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-15T04:40:07,749 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3152 2024-12-15T04:40:07,749 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9456 rows 2024-12-15T04:40:07,749 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3121 2024-12-15T04:40:07,749 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9363 rows 2024-12-15T04:40:07,749 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3109 2024-12-15T04:40:07,749 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9327 rows 2024-12-15T04:40:07,749 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3142 2024-12-15T04:40:07,749 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9426 rows 2024-12-15T04:40:07,749 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3137 2024-12-15T04:40:07,749 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 9411 rows 2024-12-15T04:40:07,750 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-15T04:40:07,750 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x117e86d9 to 127.0.0.1:55935 2024-12-15T04:40:07,750 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:40:07,753 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-15T04:40:07,753 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-15T04:40:07,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-15T04:40:07,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-15T04:40:07,757 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237607757"}]},"ts":"1734237607757"} 2024-12-15T04:40:07,758 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-15T04:40:07,769 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-15T04:40:07,770 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-15T04:40:07,771 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=118, ppid=117, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bfdbd4565c936c59a93e348f03cec823, UNASSIGN}] 2024-12-15T04:40:07,772 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=118, ppid=117, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bfdbd4565c936c59a93e348f03cec823, UNASSIGN 2024-12-15T04:40:07,772 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=118 updating hbase:meta row=bfdbd4565c936c59a93e348f03cec823, regionState=CLOSING, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:07,773 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T04:40:07,773 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; CloseRegionProcedure bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035}] 2024-12-15T04:40:07,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-15T04:40:07,925 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:07,926 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(124): Close bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:07,926 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T04:40:07,926 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1681): Closing bfdbd4565c936c59a93e348f03cec823, disabling compactions & flushes 2024-12-15T04:40:07,926 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:40:07,926 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:40:07,927 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. after waiting 0 ms 2024-12-15T04:40:07,927 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:40:07,927 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(2837): Flushing bfdbd4565c936c59a93e348f03cec823 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-15T04:40:07,928 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=A 2024-12-15T04:40:07,928 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:07,928 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=B 2024-12-15T04:40:07,928 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:07,928 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bfdbd4565c936c59a93e348f03cec823, store=C 2024-12-15T04:40:07,928 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:07,939 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121540f222f8459c4dd69d03448015e31985_bfdbd4565c936c59a93e348f03cec823 is 50, key is test_row_0/A:col10/1734237607672/Put/seqid=0 2024-12-15T04:40:07,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742259_1435 (size=12454) 2024-12-15T04:40:08,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-15T04:40:08,344 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:08,354 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121540f222f8459c4dd69d03448015e31985_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121540f222f8459c4dd69d03448015e31985_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:08,355 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/1935bc6bed864967b85f03b8f34d8fac, store: [table=TestAcidGuarantees family=A region=bfdbd4565c936c59a93e348f03cec823] 2024-12-15T04:40:08,356 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/1935bc6bed864967b85f03b8f34d8fac is 175, key is test_row_0/A:col10/1734237607672/Put/seqid=0 2024-12-15T04:40:08,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742260_1436 (size=31255) 2024-12-15T04:40:08,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-15T04:40:08,761 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=321, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/1935bc6bed864967b85f03b8f34d8fac 2024-12-15T04:40:08,772 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/ad00371d5e694914942cd2396ec8a7c5 is 50, key is test_row_0/B:col10/1734237607672/Put/seqid=0 2024-12-15T04:40:08,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742261_1437 (size=12301) 2024-12-15T04:40:08,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-15T04:40:09,178 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/ad00371d5e694914942cd2396ec8a7c5 2024-12-15T04:40:09,190 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/71255f4c207e4dea910532e1212c6e37 is 50, key is test_row_0/C:col10/1734237607672/Put/seqid=0 2024-12-15T04:40:09,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742262_1438 (size=12301) 2024-12-15T04:40:09,596 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/71255f4c207e4dea910532e1212c6e37 2024-12-15T04:40:09,607 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/A/1935bc6bed864967b85f03b8f34d8fac as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1935bc6bed864967b85f03b8f34d8fac 2024-12-15T04:40:09,613 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1935bc6bed864967b85f03b8f34d8fac, entries=150, sequenceid=321, filesize=30.5 K 2024-12-15T04:40:09,614 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/B/ad00371d5e694914942cd2396ec8a7c5 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/ad00371d5e694914942cd2396ec8a7c5 2024-12-15T04:40:09,617 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/ad00371d5e694914942cd2396ec8a7c5, entries=150, sequenceid=321, filesize=12.0 K 2024-12-15T04:40:09,617 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/.tmp/C/71255f4c207e4dea910532e1212c6e37 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/71255f4c207e4dea910532e1212c6e37 2024-12-15T04:40:09,620 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/71255f4c207e4dea910532e1212c6e37, entries=150, sequenceid=321, filesize=12.0 K 2024-12-15T04:40:09,621 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for bfdbd4565c936c59a93e348f03cec823 in 1694ms, sequenceid=321, compaction requested=true 2024-12-15T04:40:09,624 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/recovered.edits/324.seqid, newMaxSeqId=324, maxSeqId=4 2024-12-15T04:40:09,624 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823. 2024-12-15T04:40:09,624 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1635): Region close journal for bfdbd4565c936c59a93e348f03cec823: 2024-12-15T04:40:09,626 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(170): Closed bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:09,626 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=118 updating hbase:meta row=bfdbd4565c936c59a93e348f03cec823, regionState=CLOSED 2024-12-15T04:40:09,628 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-12-15T04:40:09,628 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; CloseRegionProcedure bfdbd4565c936c59a93e348f03cec823, server=e56de37b85b3,43199,1734237482035 in 1.8540 sec 2024-12-15T04:40:09,629 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=118, resume processing ppid=117 2024-12-15T04:40:09,629 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, ppid=117, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=bfdbd4565c936c59a93e348f03cec823, UNASSIGN in 1.8570 sec 2024-12-15T04:40:09,631 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-12-15T04:40:09,631 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8600 sec 2024-12-15T04:40:09,632 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237609632"}]},"ts":"1734237609632"} 2024-12-15T04:40:09,633 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-15T04:40:09,669 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-15T04:40:09,671 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9160 sec 2024-12-15T04:40:09,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-12-15T04:40:09,865 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-12-15T04:40:09,867 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-15T04:40:09,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:40:09,871 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=120, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:40:09,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-15T04:40:09,872 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=120, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:40:09,874 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:09,878 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A, FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B, FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C, FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/recovered.edits] 2024-12-15T04:40:09,882 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/6ffb8a62d70f46479dd4e35fbdfd7689 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/6ffb8a62d70f46479dd4e35fbdfd7689 2024-12-15T04:40:09,882 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1935bc6bed864967b85f03b8f34d8fac to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/1935bc6bed864967b85f03b8f34d8fac 2024-12-15T04:40:09,882 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/ba7d3acbbf5a4b12b4aea322388e38e8 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/A/ba7d3acbbf5a4b12b4aea322388e38e8 2024-12-15T04:40:09,886 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/ad00371d5e694914942cd2396ec8a7c5 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/ad00371d5e694914942cd2396ec8a7c5 2024-12-15T04:40:09,886 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/b7ac45b93be749afabe20b7e16fde105 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/b7ac45b93be749afabe20b7e16fde105 2024-12-15T04:40:09,886 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/d1dc267d31964c118dbe106b8715b173 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/B/d1dc267d31964c118dbe106b8715b173 2024-12-15T04:40:09,890 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/db31345efa794ec18d5cb6918e107a98 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/db31345efa794ec18d5cb6918e107a98 2024-12-15T04:40:09,890 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/c6aa968d31ea4d5182f330aa45e6bebf to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/c6aa968d31ea4d5182f330aa45e6bebf 2024-12-15T04:40:09,890 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/71255f4c207e4dea910532e1212c6e37 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/C/71255f4c207e4dea910532e1212c6e37 2024-12-15T04:40:09,894 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/recovered.edits/324.seqid to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823/recovered.edits/324.seqid 2024-12-15T04:40:09,895 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:09,895 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-15T04:40:09,895 DEBUG [PEWorker-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-15T04:40:09,896 DEBUG [PEWorker-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-15T04:40:09,901 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412150a1235a89b6f42b5947965ca8c1ec837_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412150a1235a89b6f42b5947965ca8c1ec837_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:09,901 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121513a02600cbc84d8f9b38042bb007f24b_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121513a02600cbc84d8f9b38042bb007f24b_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:09,901 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121525b86597306a4c65b13fc912b90c878f_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121525b86597306a4c65b13fc912b90c878f_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:09,901 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412155654908c94f7463aac1260a7a260b5a3_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412155654908c94f7463aac1260a7a260b5a3_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:09,901 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121551d90326e85a4b1082f39436b741560b_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121551d90326e85a4b1082f39436b741560b_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:09,901 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121542cefa6c0e1049a6b2db201b5ccb6098_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121542cefa6c0e1049a6b2db201b5ccb6098_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:09,901 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215249e1099f16a475193995d4f5bfa40a1_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215249e1099f16a475193995d4f5bfa40a1_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:09,902 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121540f222f8459c4dd69d03448015e31985_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121540f222f8459c4dd69d03448015e31985_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:09,902 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215979eea398d54407699342164459ba086_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215979eea398d54407699342164459ba086_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:09,903 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412155e549c0ff707424bb65ed0ce8acf121d_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412155e549c0ff707424bb65ed0ce8acf121d_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:09,903 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412157ff6a5634970461f996e756ab125d7dc_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412157ff6a5634970461f996e756ab125d7dc_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:09,903 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412159802a0f7bc514a3b8f2d198da8c80b2f_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412159802a0f7bc514a3b8f2d198da8c80b2f_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:09,903 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215bcaf620b148d4ae485e2b4c88dd8ad9e_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215bcaf620b148d4ae485e2b4c88dd8ad9e_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:09,903 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215d90d0ef585e74218b95dbaa8622dfe82_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215d90d0ef585e74218b95dbaa8622dfe82_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:09,903 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215e2c21c93da5e4644b3b948508b6ef162_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215e2c21c93da5e4644b3b948508b6ef162_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:09,903 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215fce9712e82744534975ce81ef09af6e3_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215fce9712e82744534975ce81ef09af6e3_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:09,904 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215feb315607cd14d0f99fb0d4f1cc1baf5_bfdbd4565c936c59a93e348f03cec823 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215feb315607cd14d0f99fb0d4f1cc1baf5_bfdbd4565c936c59a93e348f03cec823 2024-12-15T04:40:09,904 DEBUG [PEWorker-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-15T04:40:09,906 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=120, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:40:09,907 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-15T04:40:09,908 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-15T04:40:09,909 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=120, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:40:09,909 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-15T04:40:09,909 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734237609909"}]},"ts":"9223372036854775807"} 2024-12-15T04:40:09,911 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-15T04:40:09,911 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => bfdbd4565c936c59a93e348f03cec823, NAME => 'TestAcidGuarantees,,1734237576795.bfdbd4565c936c59a93e348f03cec823.', STARTKEY => '', ENDKEY => ''}] 2024-12-15T04:40:09,911 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-15T04:40:09,911 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734237609911"}]},"ts":"9223372036854775807"} 2024-12-15T04:40:09,912 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-15T04:40:09,970 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=120, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:40:09,972 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 103 msec 2024-12-15T04:40:09,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-12-15T04:40:09,974 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-12-15T04:40:09,989 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobScanAtomicity Thread=244 (was 246), OpenFileDescriptor=446 (was 457), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=302 (was 322), ProcessCount=11 (was 11), AvailableMemoryMB=4484 (was 4497) 2024-12-15T04:40:09,997 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=244, OpenFileDescriptor=446, MaxFileDescriptor=1048576, SystemLoadAverage=302, ProcessCount=11, AvailableMemoryMB=4484 2024-12-15T04:40:10,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-15T04:40:10,004 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-15T04:40:10,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=121, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-15T04:40:10,005 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-15T04:40:10,006 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:10,006 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 121 2024-12-15T04:40:10,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-15T04:40:10,006 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-15T04:40:10,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742263_1439 (size=963) 2024-12-15T04:40:10,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-15T04:40:10,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-15T04:40:10,417 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9 2024-12-15T04:40:10,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742264_1440 (size=53) 2024-12-15T04:40:10,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-15T04:40:10,832 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T04:40:10,832 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 7c960dc144bb2e403b8a500d7a170ddd, disabling compactions & flushes 2024-12-15T04:40:10,832 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:10,832 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:10,832 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. after waiting 0 ms 2024-12-15T04:40:10,832 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:10,832 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:10,832 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:10,835 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-15T04:40:10,835 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734237610835"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734237610835"}]},"ts":"1734237610835"} 2024-12-15T04:40:10,838 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-15T04:40:10,840 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-15T04:40:10,840 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237610840"}]},"ts":"1734237610840"} 2024-12-15T04:40:10,841 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-15T04:40:10,886 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7c960dc144bb2e403b8a500d7a170ddd, ASSIGN}] 2024-12-15T04:40:10,888 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7c960dc144bb2e403b8a500d7a170ddd, ASSIGN 2024-12-15T04:40:10,889 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=7c960dc144bb2e403b8a500d7a170ddd, ASSIGN; state=OFFLINE, location=e56de37b85b3,43199,1734237482035; forceNewPlan=false, retain=false 2024-12-15T04:40:11,040 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=7c960dc144bb2e403b8a500d7a170ddd, regionState=OPENING, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:11,043 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; OpenRegionProcedure 7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035}] 2024-12-15T04:40:11,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-15T04:40:11,197 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:11,202 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:11,203 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(7285): Opening region: {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} 2024-12-15T04:40:11,203 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:11,203 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T04:40:11,203 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(7327): checking encryption for 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:11,203 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(7330): checking classloading for 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:11,205 INFO [StoreOpener-7c960dc144bb2e403b8a500d7a170ddd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:11,207 INFO [StoreOpener-7c960dc144bb2e403b8a500d7a170ddd-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:40:11,207 INFO [StoreOpener-7c960dc144bb2e403b8a500d7a170ddd-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7c960dc144bb2e403b8a500d7a170ddd columnFamilyName A 2024-12-15T04:40:11,207 DEBUG [StoreOpener-7c960dc144bb2e403b8a500d7a170ddd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:11,208 INFO [StoreOpener-7c960dc144bb2e403b8a500d7a170ddd-1 {}] regionserver.HStore(327): Store=7c960dc144bb2e403b8a500d7a170ddd/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:40:11,208 INFO [StoreOpener-7c960dc144bb2e403b8a500d7a170ddd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:11,208 INFO [StoreOpener-7c960dc144bb2e403b8a500d7a170ddd-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:40:11,209 INFO [StoreOpener-7c960dc144bb2e403b8a500d7a170ddd-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7c960dc144bb2e403b8a500d7a170ddd columnFamilyName B 2024-12-15T04:40:11,209 DEBUG [StoreOpener-7c960dc144bb2e403b8a500d7a170ddd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:11,209 INFO [StoreOpener-7c960dc144bb2e403b8a500d7a170ddd-1 {}] regionserver.HStore(327): Store=7c960dc144bb2e403b8a500d7a170ddd/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:40:11,209 INFO [StoreOpener-7c960dc144bb2e403b8a500d7a170ddd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:11,210 INFO [StoreOpener-7c960dc144bb2e403b8a500d7a170ddd-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:40:11,210 INFO [StoreOpener-7c960dc144bb2e403b8a500d7a170ddd-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7c960dc144bb2e403b8a500d7a170ddd columnFamilyName C 2024-12-15T04:40:11,210 DEBUG [StoreOpener-7c960dc144bb2e403b8a500d7a170ddd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:11,210 INFO [StoreOpener-7c960dc144bb2e403b8a500d7a170ddd-1 {}] regionserver.HStore(327): Store=7c960dc144bb2e403b8a500d7a170ddd/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:40:11,211 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:11,211 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:11,211 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:11,213 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-15T04:40:11,213 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1085): writing seq id for 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:11,215 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T04:40:11,215 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1102): Opened 7c960dc144bb2e403b8a500d7a170ddd; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66548786, jitterRate=-0.008345812559127808}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-15T04:40:11,216 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegion(1001): Region open journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:11,216 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., pid=123, masterSystemTime=1734237611196 2024-12-15T04:40:11,217 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:11,218 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=123}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:11,218 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=7c960dc144bb2e403b8a500d7a170ddd, regionState=OPEN, openSeqNum=2, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:11,220 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-15T04:40:11,220 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; OpenRegionProcedure 7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 in 177 msec 2024-12-15T04:40:11,222 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=122, resume processing ppid=121 2024-12-15T04:40:11,222 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, ppid=121, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7c960dc144bb2e403b8a500d7a170ddd, ASSIGN in 334 msec 2024-12-15T04:40:11,222 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-15T04:40:11,222 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237611222"}]},"ts":"1734237611222"} 2024-12-15T04:40:11,223 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-15T04:40:11,263 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=121, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-15T04:40:11,265 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2590 sec 2024-12-15T04:40:12,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=121 2024-12-15T04:40:12,117 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 121 completed 2024-12-15T04:40:12,120 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7d0ab200 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@32bb71c 2024-12-15T04:40:12,163 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@de9f076, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:40:12,166 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:40:12,169 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41274, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:40:12,171 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-15T04:40:12,172 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40602, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-15T04:40:12,174 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5871c039 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bc0f7c 2024-12-15T04:40:12,186 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4414259d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:40:12,187 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7daa5922 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1b8b6e04 2024-12-15T04:40:12,195 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ed69825, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:40:12,196 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b7f20c4 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bc486e1 2024-12-15T04:40:12,203 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11193a0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:40:12,204 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5f7c40ba to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2070263a 2024-12-15T04:40:12,212 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7861b162, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:40:12,212 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x41b0e7b6 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6050584c 2024-12-15T04:40:12,220 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@154f0f85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:40:12,220 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6dd48863 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8a917b 2024-12-15T04:40:12,228 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3652e74d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:40:12,229 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51196534 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@54c2725 2024-12-15T04:40:12,237 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2405c04e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:40:12,237 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1dc5e114 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79d49886 2024-12-15T04:40:12,245 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73d92042, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:40:12,246 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3e96b8ad to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@635b1751 2024-12-15T04:40:12,253 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@593af048, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:40:12,254 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17e5a47d to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2cbfd84f 2024-12-15T04:40:12,262 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2209c520, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:40:12,263 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:40:12,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees 2024-12-15T04:40:12,264 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:40:12,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-15T04:40:12,265 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=124, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:40:12,265 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:40:12,267 DEBUG [hconnection-0x43cc4d43-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:40:12,268 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41284, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:40:12,269 DEBUG [hconnection-0x6fc3b276-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:40:12,270 DEBUG [hconnection-0x5a026a82-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:40:12,270 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41294, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:40:12,271 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41292, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:40:12,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:12,274 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7c960dc144bb2e403b8a500d7a170ddd 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-15T04:40:12,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=A 2024-12-15T04:40:12,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:12,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=B 2024-12-15T04:40:12,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:12,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=C 2024-12-15T04:40:12,275 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:12,275 DEBUG [hconnection-0x536c23fb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:40:12,275 DEBUG [hconnection-0x17fbeb89-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:40:12,276 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41330, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:40:12,276 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41318, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:40:12,279 DEBUG [hconnection-0x6c2f3258-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:40:12,279 DEBUG [hconnection-0x4c97fd38-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:40:12,280 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41348, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:40:12,280 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41334, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:40:12,284 DEBUG [hconnection-0x58a5ec77-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:40:12,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:12,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237672283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:12,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:12,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:12,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237672284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:12,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237672284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:12,285 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41364, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:40:12,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:12,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41330 deadline: 1734237672286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:12,294 DEBUG [hconnection-0x63a2d58-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:40:12,295 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41376, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:40:12,296 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:12,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237672296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:12,299 DEBUG [hconnection-0x72b45133-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:40:12,300 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41390, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:40:12,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/d17a4380c41043cfb638f3c65db7a4f2 is 50, key is test_row_0/A:col10/1734237612273/Put/seqid=0 2024-12-15T04:40:12,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742265_1441 (size=12001) 2024-12-15T04:40:12,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-15T04:40:12,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:12,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237672385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:12,388 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:12,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237672385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:12,389 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:12,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237672386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:12,390 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:12,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41330 deadline: 1734237672386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:12,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:12,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237672397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:12,415 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:12,416 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-15T04:40:12,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:12,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:12,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:12,416 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:12,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:12,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:12,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-15T04:40:12,568 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:12,568 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-15T04:40:12,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:12,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:12,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:12,568 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:12,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:12,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:12,591 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:12,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237672589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:12,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:12,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237672589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:12,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:12,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237672590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:12,593 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:12,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41330 deadline: 1734237672591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:12,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:12,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237672601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:12,720 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:12,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-15T04:40:12,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:12,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:12,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:12,720 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:12,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:12,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:12,737 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/d17a4380c41043cfb638f3c65db7a4f2 2024-12-15T04:40:12,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/76e3fe40a08944a3ab13255a8e45efb5 is 50, key is test_row_0/B:col10/1734237612273/Put/seqid=0 2024-12-15T04:40:12,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742266_1442 (size=12001) 2024-12-15T04:40:12,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-15T04:40:12,887 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:12,888 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-15T04:40:12,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:12,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:12,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:12,888 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:12,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:12,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:12,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:12,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237672893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:12,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:12,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237672894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:12,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:12,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41330 deadline: 1734237672894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:12,897 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:12,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237672895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:12,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:12,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237672904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:13,039 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:13,040 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-15T04:40:13,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:13,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:13,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:13,040 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:13,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:13,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:13,162 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/76e3fe40a08944a3ab13255a8e45efb5 2024-12-15T04:40:13,178 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/7ae1dd13abc64b6eb472710f12c6a278 is 50, key is test_row_0/C:col10/1734237612273/Put/seqid=0 2024-12-15T04:40:13,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742267_1443 (size=12001) 2024-12-15T04:40:13,192 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:13,192 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-15T04:40:13,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:13,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:13,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:13,192 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:13,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:13,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:13,344 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:13,344 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-15T04:40:13,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:13,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:13,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:13,345 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:13,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:13,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:13,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-15T04:40:13,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:13,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237673397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:13,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:13,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41330 deadline: 1734237673398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:13,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:13,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237673399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:13,400 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:13,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237673400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:13,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:13,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237673410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:13,496 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:13,497 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-15T04:40:13,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:13,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:13,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:13,497 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] handler.RSProcedureHandler(58): pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:13,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=125 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:13,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=125 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:13,582 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/7ae1dd13abc64b6eb472710f12c6a278 2024-12-15T04:40:13,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/d17a4380c41043cfb638f3c65db7a4f2 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/d17a4380c41043cfb638f3c65db7a4f2 2024-12-15T04:40:13,588 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/d17a4380c41043cfb638f3c65db7a4f2, entries=150, sequenceid=14, filesize=11.7 K 2024-12-15T04:40:13,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/76e3fe40a08944a3ab13255a8e45efb5 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/76e3fe40a08944a3ab13255a8e45efb5 2024-12-15T04:40:13,591 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/76e3fe40a08944a3ab13255a8e45efb5, entries=150, sequenceid=14, filesize=11.7 K 2024-12-15T04:40:13,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/7ae1dd13abc64b6eb472710f12c6a278 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/7ae1dd13abc64b6eb472710f12c6a278 2024-12-15T04:40:13,594 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/7ae1dd13abc64b6eb472710f12c6a278, entries=150, sequenceid=14, filesize=11.7 K 2024-12-15T04:40:13,595 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 7c960dc144bb2e403b8a500d7a170ddd in 1321ms, sequenceid=14, compaction requested=false 2024-12-15T04:40:13,595 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-12-15T04:40:13,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:13,649 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:13,649 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=125 2024-12-15T04:40:13,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:13,649 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2837): Flushing 7c960dc144bb2e403b8a500d7a170ddd 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-15T04:40:13,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=A 2024-12-15T04:40:13,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:13,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=B 2024-12-15T04:40:13,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:13,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=C 2024-12-15T04:40:13,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:13,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/5b7662b31d854001ad33f62d109c82b3 is 50, key is test_row_0/A:col10/1734237612283/Put/seqid=0 2024-12-15T04:40:13,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742268_1444 (size=12001) 2024-12-15T04:40:13,657 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/5b7662b31d854001ad33f62d109c82b3 2024-12-15T04:40:13,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/c979d87689d14c68a3dc740bdedeb2a0 is 50, key is test_row_0/B:col10/1734237612283/Put/seqid=0 2024-12-15T04:40:13,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742269_1445 (size=12001) 2024-12-15T04:40:14,065 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/c979d87689d14c68a3dc740bdedeb2a0 2024-12-15T04:40:14,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/56ccfc02976f4680a5c2548be3b2a50d is 50, key is test_row_0/C:col10/1734237612283/Put/seqid=0 2024-12-15T04:40:14,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742270_1446 (size=12001) 2024-12-15T04:40:14,073 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/56ccfc02976f4680a5c2548be3b2a50d 2024-12-15T04:40:14,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/5b7662b31d854001ad33f62d109c82b3 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/5b7662b31d854001ad33f62d109c82b3 2024-12-15T04:40:14,079 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/5b7662b31d854001ad33f62d109c82b3, entries=150, sequenceid=38, filesize=11.7 K 2024-12-15T04:40:14,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/c979d87689d14c68a3dc740bdedeb2a0 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/c979d87689d14c68a3dc740bdedeb2a0 2024-12-15T04:40:14,083 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/c979d87689d14c68a3dc740bdedeb2a0, entries=150, sequenceid=38, filesize=11.7 K 2024-12-15T04:40:14,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/56ccfc02976f4680a5c2548be3b2a50d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/56ccfc02976f4680a5c2548be3b2a50d 2024-12-15T04:40:14,088 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/56ccfc02976f4680a5c2548be3b2a50d, entries=150, sequenceid=38, filesize=11.7 K 2024-12-15T04:40:14,088 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for 7c960dc144bb2e403b8a500d7a170ddd in 439ms, sequenceid=38, compaction requested=false 2024-12-15T04:40:14,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.HRegion(2538): Flush status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:14,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:14,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=125}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=125 2024-12-15T04:40:14,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=125 2024-12-15T04:40:14,091 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-15T04:40:14,091 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8250 sec 2024-12-15T04:40:14,093 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=124, table=TestAcidGuarantees in 1.8280 sec 2024-12-15T04:40:14,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-15T04:40:14,368 INFO [Thread-1979 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-12-15T04:40:14,368 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:40:14,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees 2024-12-15T04:40:14,369 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:40:14,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-15T04:40:14,370 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=126, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:40:14,370 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:40:14,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:14,410 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7c960dc144bb2e403b8a500d7a170ddd 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-15T04:40:14,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=A 2024-12-15T04:40:14,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:14,410 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=B 2024-12-15T04:40:14,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:14,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=C 2024-12-15T04:40:14,411 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:14,413 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/3e79ee560b6a492a80ded4345d90c6e4 is 50, key is test_row_0/A:col10/1734237614409/Put/seqid=0 2024-12-15T04:40:14,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742271_1447 (size=16681) 2024-12-15T04:40:14,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:14,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237674430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:14,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:14,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237674431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:14,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:14,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41330 deadline: 1734237674434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:14,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:14,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237674434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:14,440 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:14,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237674436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:14,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-15T04:40:14,521 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:14,521 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-15T04:40:14,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:14,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:14,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:14,522 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:14,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:14,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:14,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:14,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237674537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:14,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:14,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237674537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:14,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:14,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41330 deadline: 1734237674538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:14,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:14,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237674538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:14,542 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:14,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237674541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:14,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-15T04:40:14,673 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:14,673 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-15T04:40:14,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:14,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:14,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:14,674 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:14,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:14,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:14,745 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:14,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:14,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237674739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:14,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237674739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:14,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:14,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41330 deadline: 1734237674742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:14,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:14,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237674742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:14,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:14,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237674743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:14,820 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/3e79ee560b6a492a80ded4345d90c6e4 2024-12-15T04:40:14,825 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/49bc2cc2fdf64c07b12dc0c72d51bddc is 50, key is test_row_0/B:col10/1734237614409/Put/seqid=0 2024-12-15T04:40:14,825 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:14,826 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-15T04:40:14,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:14,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:14,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:14,826 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:14,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:14,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:14,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742272_1448 (size=12001) 2024-12-15T04:40:14,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-15T04:40:14,978 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:14,979 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-15T04:40:14,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:14,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:14,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:14,979 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:14,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:14,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:15,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:15,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237675046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:15,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:15,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237675047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:15,048 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:15,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237675047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:15,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:15,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237675048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:15,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:15,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41330 deadline: 1734237675048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:15,130 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:15,131 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-15T04:40:15,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:15,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:15,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:15,131 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] handler.RSProcedureHandler(58): pid=127 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:15,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=127 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:15,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=127 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:15,143 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-15T04:40:15,233 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/49bc2cc2fdf64c07b12dc0c72d51bddc 2024-12-15T04:40:15,239 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/00e943de5cb14851a9b0b1a33915b067 is 50, key is test_row_0/C:col10/1734237614409/Put/seqid=0 2024-12-15T04:40:15,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742273_1449 (size=12001) 2024-12-15T04:40:15,244 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/00e943de5cb14851a9b0b1a33915b067 2024-12-15T04:40:15,247 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/3e79ee560b6a492a80ded4345d90c6e4 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/3e79ee560b6a492a80ded4345d90c6e4 2024-12-15T04:40:15,250 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/3e79ee560b6a492a80ded4345d90c6e4, entries=250, sequenceid=49, filesize=16.3 K 2024-12-15T04:40:15,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/49bc2cc2fdf64c07b12dc0c72d51bddc as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/49bc2cc2fdf64c07b12dc0c72d51bddc 2024-12-15T04:40:15,253 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/49bc2cc2fdf64c07b12dc0c72d51bddc, entries=150, sequenceid=49, filesize=11.7 K 2024-12-15T04:40:15,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/00e943de5cb14851a9b0b1a33915b067 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/00e943de5cb14851a9b0b1a33915b067 2024-12-15T04:40:15,257 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/00e943de5cb14851a9b0b1a33915b067, entries=150, sequenceid=49, filesize=11.7 K 2024-12-15T04:40:15,258 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 7c960dc144bb2e403b8a500d7a170ddd in 848ms, sequenceid=49, compaction requested=true 2024-12-15T04:40:15,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:15,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7c960dc144bb2e403b8a500d7a170ddd:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:40:15,258 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:15,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:15,258 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:15,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7c960dc144bb2e403b8a500d7a170ddd:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:40:15,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:15,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7c960dc144bb2e403b8a500d7a170ddd:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:40:15,258 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:15,259 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:15,259 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40683 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:15,259 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 7c960dc144bb2e403b8a500d7a170ddd/A is initiating minor compaction (all files) 2024-12-15T04:40:15,259 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 7c960dc144bb2e403b8a500d7a170ddd/B is initiating minor compaction (all files) 2024-12-15T04:40:15,259 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7c960dc144bb2e403b8a500d7a170ddd/B in TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:15,259 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7c960dc144bb2e403b8a500d7a170ddd/A in TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:15,259 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/76e3fe40a08944a3ab13255a8e45efb5, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/c979d87689d14c68a3dc740bdedeb2a0, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/49bc2cc2fdf64c07b12dc0c72d51bddc] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp, totalSize=35.2 K 2024-12-15T04:40:15,259 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/d17a4380c41043cfb638f3c65db7a4f2, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/5b7662b31d854001ad33f62d109c82b3, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/3e79ee560b6a492a80ded4345d90c6e4] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp, totalSize=39.7 K 2024-12-15T04:40:15,260 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 76e3fe40a08944a3ab13255a8e45efb5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734237612273 2024-12-15T04:40:15,260 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting d17a4380c41043cfb638f3c65db7a4f2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734237612273 2024-12-15T04:40:15,260 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting c979d87689d14c68a3dc740bdedeb2a0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734237612282 2024-12-15T04:40:15,260 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5b7662b31d854001ad33f62d109c82b3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734237612282 2024-12-15T04:40:15,260 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 49bc2cc2fdf64c07b12dc0c72d51bddc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1734237614401 2024-12-15T04:40:15,260 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e79ee560b6a492a80ded4345d90c6e4, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1734237614401 2024-12-15T04:40:15,265 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7c960dc144bb2e403b8a500d7a170ddd#A#compaction#369 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:15,265 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/924c26784a36415aa1ac9088b9635a03 is 50, key is test_row_0/A:col10/1734237614409/Put/seqid=0 2024-12-15T04:40:15,266 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7c960dc144bb2e403b8a500d7a170ddd#B#compaction#370 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:15,266 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/dac95dc0f7134971aa20277180caf5c2 is 50, key is test_row_0/B:col10/1734237614409/Put/seqid=0 2024-12-15T04:40:15,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742275_1451 (size=12104) 2024-12-15T04:40:15,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742274_1450 (size=12104) 2024-12-15T04:40:15,283 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:15,283 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=127 2024-12-15T04:40:15,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:15,283 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2837): Flushing 7c960dc144bb2e403b8a500d7a170ddd 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-15T04:40:15,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=A 2024-12-15T04:40:15,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:15,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=B 2024-12-15T04:40:15,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:15,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=C 2024-12-15T04:40:15,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:15,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/b2bc4b2d7fec4253b460979562376da8 is 50, key is test_row_0/A:col10/1734237614430/Put/seqid=0 2024-12-15T04:40:15,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742276_1452 (size=12001) 2024-12-15T04:40:15,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-15T04:40:15,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:15,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:15,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:15,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41330 deadline: 1734237675555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:15,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:15,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237675557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:15,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:15,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237675557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:15,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:15,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237675558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:15,562 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:15,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237675558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:15,661 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:15,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237675659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:15,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:15,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237675663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:15,665 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:15,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237675663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:15,666 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:15,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237675663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:15,676 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/dac95dc0f7134971aa20277180caf5c2 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/dac95dc0f7134971aa20277180caf5c2 2024-12-15T04:40:15,679 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7c960dc144bb2e403b8a500d7a170ddd/B of 7c960dc144bb2e403b8a500d7a170ddd into dac95dc0f7134971aa20277180caf5c2(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:15,679 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:15,679 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., storeName=7c960dc144bb2e403b8a500d7a170ddd/B, priority=13, startTime=1734237615258; duration=0sec 2024-12-15T04:40:15,679 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:15,679 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7c960dc144bb2e403b8a500d7a170ddd:B 2024-12-15T04:40:15,679 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:15,680 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:15,680 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 7c960dc144bb2e403b8a500d7a170ddd/C is initiating minor compaction (all files) 2024-12-15T04:40:15,680 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7c960dc144bb2e403b8a500d7a170ddd/C in TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:15,680 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/7ae1dd13abc64b6eb472710f12c6a278, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/56ccfc02976f4680a5c2548be3b2a50d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/00e943de5cb14851a9b0b1a33915b067] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp, totalSize=35.2 K 2024-12-15T04:40:15,680 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ae1dd13abc64b6eb472710f12c6a278, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1734237612273 2024-12-15T04:40:15,681 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 56ccfc02976f4680a5c2548be3b2a50d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1734237612282 2024-12-15T04:40:15,681 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 00e943de5cb14851a9b0b1a33915b067, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1734237614401 2024-12-15T04:40:15,685 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7c960dc144bb2e403b8a500d7a170ddd#C#compaction#372 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:15,685 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/97c9a5a301534661b3991fa981d94ca8 is 50, key is test_row_0/C:col10/1734237614409/Put/seqid=0 2024-12-15T04:40:15,686 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/924c26784a36415aa1ac9088b9635a03 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/924c26784a36415aa1ac9088b9635a03 2024-12-15T04:40:15,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742277_1453 (size=12104) 2024-12-15T04:40:15,689 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7c960dc144bb2e403b8a500d7a170ddd/A of 7c960dc144bb2e403b8a500d7a170ddd into 924c26784a36415aa1ac9088b9635a03(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:15,689 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:15,689 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., storeName=7c960dc144bb2e403b8a500d7a170ddd/A, priority=13, startTime=1734237615258; duration=0sec 2024-12-15T04:40:15,690 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:15,690 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7c960dc144bb2e403b8a500d7a170ddd:A 2024-12-15T04:40:15,690 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/b2bc4b2d7fec4253b460979562376da8 2024-12-15T04:40:15,692 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/97c9a5a301534661b3991fa981d94ca8 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/97c9a5a301534661b3991fa981d94ca8 2024-12-15T04:40:15,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/471bbd0f685044ef942a8ce5aef5f6d4 is 50, key is test_row_0/B:col10/1734237614430/Put/seqid=0 2024-12-15T04:40:15,695 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7c960dc144bb2e403b8a500d7a170ddd/C of 7c960dc144bb2e403b8a500d7a170ddd into 97c9a5a301534661b3991fa981d94ca8(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:15,695 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:15,695 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., storeName=7c960dc144bb2e403b8a500d7a170ddd/C, priority=13, startTime=1734237615258; duration=0sec 2024-12-15T04:40:15,695 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:15,696 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7c960dc144bb2e403b8a500d7a170ddd:C 2024-12-15T04:40:15,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742278_1454 (size=12001) 2024-12-15T04:40:15,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:15,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237675863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:15,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:15,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237675866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:15,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:15,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237675866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:15,869 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:15,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237675866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:16,098 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/471bbd0f685044ef942a8ce5aef5f6d4 2024-12-15T04:40:16,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/73d47b5b407e452399aee27cd3cd8081 is 50, key is test_row_0/C:col10/1734237614430/Put/seqid=0 2024-12-15T04:40:16,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742279_1455 (size=12001) 2024-12-15T04:40:16,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:16,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237676165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:16,174 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:16,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237676169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:16,174 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:16,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237676170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:16,174 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:16,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237676171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:16,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-15T04:40:16,508 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/73d47b5b407e452399aee27cd3cd8081 2024-12-15T04:40:16,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/b2bc4b2d7fec4253b460979562376da8 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/b2bc4b2d7fec4253b460979562376da8 2024-12-15T04:40:16,514 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/b2bc4b2d7fec4253b460979562376da8, entries=150, sequenceid=74, filesize=11.7 K 2024-12-15T04:40:16,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/471bbd0f685044ef942a8ce5aef5f6d4 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/471bbd0f685044ef942a8ce5aef5f6d4 2024-12-15T04:40:16,517 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/471bbd0f685044ef942a8ce5aef5f6d4, entries=150, sequenceid=74, filesize=11.7 K 2024-12-15T04:40:16,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/73d47b5b407e452399aee27cd3cd8081 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/73d47b5b407e452399aee27cd3cd8081 2024-12-15T04:40:16,520 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/73d47b5b407e452399aee27cd3cd8081, entries=150, sequenceid=74, filesize=11.7 K 2024-12-15T04:40:16,521 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 7c960dc144bb2e403b8a500d7a170ddd in 1238ms, sequenceid=74, compaction requested=false 2024-12-15T04:40:16,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.HRegion(2538): Flush status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:16,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:16,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=127}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=127 2024-12-15T04:40:16,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=127 2024-12-15T04:40:16,522 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-15T04:40:16,522 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1520 sec 2024-12-15T04:40:16,523 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=126, table=TestAcidGuarantees in 2.1550 sec 2024-12-15T04:40:16,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:16,565 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7c960dc144bb2e403b8a500d7a170ddd 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-15T04:40:16,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=A 2024-12-15T04:40:16,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:16,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=B 2024-12-15T04:40:16,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:16,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=C 2024-12-15T04:40:16,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:16,568 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/6690d1336cba4655a52ebafc61009cee is 50, key is test_row_0/A:col10/1734237616564/Put/seqid=0 2024-12-15T04:40:16,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742280_1456 (size=12001) 2024-12-15T04:40:16,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:16,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41330 deadline: 1734237676634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:16,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:16,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237676670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:16,679 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:16,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237676676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:16,679 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:16,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237676677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:16,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:16,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237676677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:16,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:16,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41330 deadline: 1734237676738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:16,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:16,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41330 deadline: 1734237676940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:16,971 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/6690d1336cba4655a52ebafc61009cee 2024-12-15T04:40:16,976 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/6ab27b8f51cf469fb53b8d453c3d0fd1 is 50, key is test_row_0/B:col10/1734237616564/Put/seqid=0 2024-12-15T04:40:16,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742281_1457 (size=12001) 2024-12-15T04:40:17,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:17,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41330 deadline: 1734237677245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:17,379 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/6ab27b8f51cf469fb53b8d453c3d0fd1 2024-12-15T04:40:17,384 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/bc6a8a700a954e6ba80e5cdbb15671b1 is 50, key is test_row_0/C:col10/1734237616564/Put/seqid=0 2024-12-15T04:40:17,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742282_1458 (size=12001) 2024-12-15T04:40:17,680 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:17,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237677679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:17,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:17,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237677680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:17,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:17,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237677682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:17,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:17,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237677688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:17,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:17,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41330 deadline: 1734237677750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:17,787 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/bc6a8a700a954e6ba80e5cdbb15671b1 2024-12-15T04:40:17,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/6690d1336cba4655a52ebafc61009cee as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/6690d1336cba4655a52ebafc61009cee 2024-12-15T04:40:17,816 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/6690d1336cba4655a52ebafc61009cee, entries=150, sequenceid=89, filesize=11.7 K 2024-12-15T04:40:17,816 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/6ab27b8f51cf469fb53b8d453c3d0fd1 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/6ab27b8f51cf469fb53b8d453c3d0fd1 2024-12-15T04:40:17,819 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/6ab27b8f51cf469fb53b8d453c3d0fd1, entries=150, sequenceid=89, filesize=11.7 K 2024-12-15T04:40:17,820 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/bc6a8a700a954e6ba80e5cdbb15671b1 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/bc6a8a700a954e6ba80e5cdbb15671b1 2024-12-15T04:40:17,823 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/bc6a8a700a954e6ba80e5cdbb15671b1, entries=150, sequenceid=89, filesize=11.7 K 2024-12-15T04:40:17,823 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 7c960dc144bb2e403b8a500d7a170ddd in 1258ms, sequenceid=89, compaction requested=true 2024-12-15T04:40:17,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:17,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7c960dc144bb2e403b8a500d7a170ddd:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:40:17,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:17,824 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:17,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7c960dc144bb2e403b8a500d7a170ddd:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:40:17,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:17,824 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:17,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7c960dc144bb2e403b8a500d7a170ddd:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:40:17,824 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:17,824 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:17,824 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:17,824 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 7c960dc144bb2e403b8a500d7a170ddd/B is initiating minor compaction (all files) 2024-12-15T04:40:17,824 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 7c960dc144bb2e403b8a500d7a170ddd/A is initiating minor compaction (all files) 2024-12-15T04:40:17,824 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7c960dc144bb2e403b8a500d7a170ddd/A in TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:17,824 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7c960dc144bb2e403b8a500d7a170ddd/B in TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:17,824 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/924c26784a36415aa1ac9088b9635a03, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/b2bc4b2d7fec4253b460979562376da8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/6690d1336cba4655a52ebafc61009cee] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp, totalSize=35.3 K 2024-12-15T04:40:17,824 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/dac95dc0f7134971aa20277180caf5c2, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/471bbd0f685044ef942a8ce5aef5f6d4, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/6ab27b8f51cf469fb53b8d453c3d0fd1] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp, totalSize=35.3 K 2024-12-15T04:40:17,825 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 924c26784a36415aa1ac9088b9635a03, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1734237614401 2024-12-15T04:40:17,825 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting dac95dc0f7134971aa20277180caf5c2, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1734237614401 2024-12-15T04:40:17,825 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting b2bc4b2d7fec4253b460979562376da8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1734237614430 2024-12-15T04:40:17,825 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 471bbd0f685044ef942a8ce5aef5f6d4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1734237614430 2024-12-15T04:40:17,825 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6690d1336cba4655a52ebafc61009cee, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1734237615556 2024-12-15T04:40:17,825 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ab27b8f51cf469fb53b8d453c3d0fd1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1734237615556 2024-12-15T04:40:17,830 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7c960dc144bb2e403b8a500d7a170ddd#A#compaction#378 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:17,830 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7c960dc144bb2e403b8a500d7a170ddd#B#compaction#379 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:17,830 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/9a6570a211a046beae853e9f048102a3 is 50, key is test_row_0/A:col10/1734237616564/Put/seqid=0 2024-12-15T04:40:17,830 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/96702e6038ea44bb9bec2f3b41919bf2 is 50, key is test_row_0/B:col10/1734237616564/Put/seqid=0 2024-12-15T04:40:17,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742283_1459 (size=12207) 2024-12-15T04:40:17,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742284_1460 (size=12207) 2024-12-15T04:40:17,844 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/9a6570a211a046beae853e9f048102a3 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/9a6570a211a046beae853e9f048102a3 2024-12-15T04:40:17,847 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7c960dc144bb2e403b8a500d7a170ddd/A of 7c960dc144bb2e403b8a500d7a170ddd into 9a6570a211a046beae853e9f048102a3(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:17,847 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:17,847 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., storeName=7c960dc144bb2e403b8a500d7a170ddd/A, priority=13, startTime=1734237617824; duration=0sec 2024-12-15T04:40:17,847 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:17,847 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7c960dc144bb2e403b8a500d7a170ddd:A 2024-12-15T04:40:17,847 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:17,848 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:17,848 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 7c960dc144bb2e403b8a500d7a170ddd/C is initiating minor compaction (all files) 2024-12-15T04:40:17,848 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7c960dc144bb2e403b8a500d7a170ddd/C in TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:17,848 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/97c9a5a301534661b3991fa981d94ca8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/73d47b5b407e452399aee27cd3cd8081, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/bc6a8a700a954e6ba80e5cdbb15671b1] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp, totalSize=35.3 K 2024-12-15T04:40:17,848 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 97c9a5a301534661b3991fa981d94ca8, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1734237614401 2024-12-15T04:40:17,848 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 73d47b5b407e452399aee27cd3cd8081, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1734237614430 2024-12-15T04:40:17,849 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc6a8a700a954e6ba80e5cdbb15671b1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1734237615556 2024-12-15T04:40:17,853 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7c960dc144bb2e403b8a500d7a170ddd#C#compaction#380 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:17,854 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/5e35eae8fea3424687ed34bea2afe83d is 50, key is test_row_0/C:col10/1734237616564/Put/seqid=0 2024-12-15T04:40:17,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742285_1461 (size=12207) 2024-12-15T04:40:18,245 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/96702e6038ea44bb9bec2f3b41919bf2 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/96702e6038ea44bb9bec2f3b41919bf2 2024-12-15T04:40:18,248 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7c960dc144bb2e403b8a500d7a170ddd/B of 7c960dc144bb2e403b8a500d7a170ddd into 96702e6038ea44bb9bec2f3b41919bf2(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:18,248 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:18,248 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., storeName=7c960dc144bb2e403b8a500d7a170ddd/B, priority=13, startTime=1734237617824; duration=0sec 2024-12-15T04:40:18,248 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:18,248 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7c960dc144bb2e403b8a500d7a170ddd:B 2024-12-15T04:40:18,265 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/5e35eae8fea3424687ed34bea2afe83d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/5e35eae8fea3424687ed34bea2afe83d 2024-12-15T04:40:18,269 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7c960dc144bb2e403b8a500d7a170ddd/C of 7c960dc144bb2e403b8a500d7a170ddd into 5e35eae8fea3424687ed34bea2afe83d(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:18,269 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:18,269 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., storeName=7c960dc144bb2e403b8a500d7a170ddd/C, priority=13, startTime=1734237617824; duration=0sec 2024-12-15T04:40:18,269 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:18,269 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7c960dc144bb2e403b8a500d7a170ddd:C 2024-12-15T04:40:18,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-12-15T04:40:18,474 INFO [Thread-1979 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-12-15T04:40:18,475 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:40:18,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-12-15T04:40:18,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-15T04:40:18,479 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:40:18,479 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:40:18,479 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:40:18,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-15T04:40:18,631 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:18,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-12-15T04:40:18,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:18,631 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing 7c960dc144bb2e403b8a500d7a170ddd 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-15T04:40:18,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=A 2024-12-15T04:40:18,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:18,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=B 2024-12-15T04:40:18,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:18,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=C 2024-12-15T04:40:18,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:18,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/d1cd6b3e895c441eaff999917ecf4802 is 50, key is test_row_0/A:col10/1734237616634/Put/seqid=0 2024-12-15T04:40:18,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742286_1462 (size=12001) 2024-12-15T04:40:18,762 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:18,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:18,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-15T04:40:18,784 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:18,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41330 deadline: 1734237678780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:18,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:18,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41330 deadline: 1734237678885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:19,039 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/d1cd6b3e895c441eaff999917ecf4802 2024-12-15T04:40:19,044 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/43b450bc7b3d4406a7c82a68a2fd0d3f is 50, key is test_row_0/B:col10/1734237616634/Put/seqid=0 2024-12-15T04:40:19,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742287_1463 (size=12001) 2024-12-15T04:40:19,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-15T04:40:19,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:19,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41330 deadline: 1734237679090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:19,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:19,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41330 deadline: 1734237679395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:19,448 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/43b450bc7b3d4406a7c82a68a2fd0d3f 2024-12-15T04:40:19,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/f30b772d908d42308ded31b4a25a5445 is 50, key is test_row_0/C:col10/1734237616634/Put/seqid=0 2024-12-15T04:40:19,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742288_1464 (size=12001) 2024-12-15T04:40:19,463 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/f30b772d908d42308ded31b4a25a5445 2024-12-15T04:40:19,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/d1cd6b3e895c441eaff999917ecf4802 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/d1cd6b3e895c441eaff999917ecf4802 2024-12-15T04:40:19,473 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/d1cd6b3e895c441eaff999917ecf4802, entries=150, sequenceid=116, filesize=11.7 K 2024-12-15T04:40:19,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/43b450bc7b3d4406a7c82a68a2fd0d3f as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/43b450bc7b3d4406a7c82a68a2fd0d3f 2024-12-15T04:40:19,478 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/43b450bc7b3d4406a7c82a68a2fd0d3f, entries=150, sequenceid=116, filesize=11.7 K 2024-12-15T04:40:19,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/f30b772d908d42308ded31b4a25a5445 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/f30b772d908d42308ded31b4a25a5445 2024-12-15T04:40:19,482 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/f30b772d908d42308ded31b4a25a5445, entries=150, sequenceid=116, filesize=11.7 K 2024-12-15T04:40:19,483 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 7c960dc144bb2e403b8a500d7a170ddd in 852ms, sequenceid=116, compaction requested=false 2024-12-15T04:40:19,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:19,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:19,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-12-15T04:40:19,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-12-15T04:40:19,485 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-12-15T04:40:19,485 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0050 sec 2024-12-15T04:40:19,486 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 1.0100 sec 2024-12-15T04:40:19,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-15T04:40:19,578 INFO [Thread-1979 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-12-15T04:40:19,579 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:40:19,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-12-15T04:40:19,580 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:40:19,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-15T04:40:19,581 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:40:19,581 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:40:19,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-15T04:40:19,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:19,692 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7c960dc144bb2e403b8a500d7a170ddd 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-12-15T04:40:19,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=A 2024-12-15T04:40:19,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:19,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=B 2024-12-15T04:40:19,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:19,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=C 2024-12-15T04:40:19,692 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:19,695 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/e3c9a6702f5b43baaba81f9ab13672a5 is 50, key is test_row_0/A:col10/1734237618779/Put/seqid=0 2024-12-15T04:40:19,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742289_1465 (size=14391) 2024-12-15T04:40:19,720 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:19,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237679716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:19,720 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:19,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237679717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:19,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:19,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237679717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:19,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:19,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237679717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:19,732 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:19,732 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-15T04:40:19,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:19,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:19,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:19,732 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:19,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:19,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:19,822 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:19,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237679821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:19,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:19,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237679821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:19,824 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:19,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237679821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:19,824 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:19,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237679821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:19,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-15T04:40:19,884 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:19,884 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-15T04:40:19,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:19,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:19,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:19,884 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:19,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:19,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:19,901 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:19,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41330 deadline: 1734237679899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:20,027 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:20,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237680024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:20,027 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:20,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237680025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:20,027 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:20,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237680026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:20,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:20,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237680026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:20,036 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:20,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-15T04:40:20,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:20,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:20,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:20,036 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:20,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:20,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:20,105 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/e3c9a6702f5b43baaba81f9ab13672a5 2024-12-15T04:40:20,110 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/1da02946a6404d5cb53ccb29b5a3d127 is 50, key is test_row_0/B:col10/1734237618779/Put/seqid=0 2024-12-15T04:40:20,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742290_1466 (size=12051) 2024-12-15T04:40:20,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-15T04:40:20,188 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:20,188 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-15T04:40:20,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:20,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:20,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:20,189 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:20,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:20,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:20,330 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:20,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237680328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:20,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:20,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237680328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:20,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:20,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237680328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:20,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:20,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237680329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:20,340 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:20,340 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-15T04:40:20,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:20,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:20,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:20,341 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:20,341 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:20,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:20,492 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:20,493 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-15T04:40:20,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:20,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:20,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:20,493 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:20,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:20,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:20,513 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/1da02946a6404d5cb53ccb29b5a3d127 2024-12-15T04:40:20,519 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/f97b27c787d240fda1185d8a0a93e5dd is 50, key is test_row_0/C:col10/1734237618779/Put/seqid=0 2024-12-15T04:40:20,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742291_1467 (size=12051) 2024-12-15T04:40:20,644 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:20,645 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-15T04:40:20,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:20,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:20,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:20,645 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:20,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:20,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:20,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-15T04:40:20,797 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:20,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-15T04:40:20,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:20,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:20,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:20,797 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:20,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:20,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:20,836 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:20,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237680833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:20,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:20,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237680834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:20,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:20,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237680835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:20,838 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:20,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237680835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:20,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:20,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41330 deadline: 1734237680906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:20,922 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/f97b27c787d240fda1185d8a0a93e5dd 2024-12-15T04:40:20,925 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/e3c9a6702f5b43baaba81f9ab13672a5 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/e3c9a6702f5b43baaba81f9ab13672a5 2024-12-15T04:40:20,927 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/e3c9a6702f5b43baaba81f9ab13672a5, entries=200, sequenceid=129, filesize=14.1 K 2024-12-15T04:40:20,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/1da02946a6404d5cb53ccb29b5a3d127 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/1da02946a6404d5cb53ccb29b5a3d127 2024-12-15T04:40:20,931 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/1da02946a6404d5cb53ccb29b5a3d127, entries=150, sequenceid=129, filesize=11.8 K 2024-12-15T04:40:20,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/f97b27c787d240fda1185d8a0a93e5dd as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/f97b27c787d240fda1185d8a0a93e5dd 2024-12-15T04:40:20,934 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/f97b27c787d240fda1185d8a0a93e5dd, entries=150, sequenceid=129, filesize=11.8 K 2024-12-15T04:40:20,934 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 7c960dc144bb2e403b8a500d7a170ddd in 1242ms, sequenceid=129, compaction requested=true 2024-12-15T04:40:20,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:20,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7c960dc144bb2e403b8a500d7a170ddd:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:40:20,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:20,934 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:20,934 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:20,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7c960dc144bb2e403b8a500d7a170ddd:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:40:20,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:20,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7c960dc144bb2e403b8a500d7a170ddd:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:40:20,935 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:20,935 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38599 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:20,935 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 7c960dc144bb2e403b8a500d7a170ddd/A is initiating minor compaction (all files) 2024-12-15T04:40:20,935 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7c960dc144bb2e403b8a500d7a170ddd/A in TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:20,935 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/9a6570a211a046beae853e9f048102a3, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/d1cd6b3e895c441eaff999917ecf4802, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/e3c9a6702f5b43baaba81f9ab13672a5] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp, totalSize=37.7 K 2024-12-15T04:40:20,935 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:20,936 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 7c960dc144bb2e403b8a500d7a170ddd/B is initiating minor compaction (all files) 2024-12-15T04:40:20,936 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7c960dc144bb2e403b8a500d7a170ddd/B in TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:20,936 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/96702e6038ea44bb9bec2f3b41919bf2, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/43b450bc7b3d4406a7c82a68a2fd0d3f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/1da02946a6404d5cb53ccb29b5a3d127] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp, totalSize=35.4 K 2024-12-15T04:40:20,936 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9a6570a211a046beae853e9f048102a3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1734237615556 2024-12-15T04:40:20,936 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 96702e6038ea44bb9bec2f3b41919bf2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1734237615556 2024-12-15T04:40:20,936 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1cd6b3e895c441eaff999917ecf4802, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1734237616624 2024-12-15T04:40:20,936 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 43b450bc7b3d4406a7c82a68a2fd0d3f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1734237616624 2024-12-15T04:40:20,936 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3c9a6702f5b43baaba81f9ab13672a5, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734237618769 2024-12-15T04:40:20,936 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 1da02946a6404d5cb53ccb29b5a3d127, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734237618769 2024-12-15T04:40:20,941 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7c960dc144bb2e403b8a500d7a170ddd#B#compaction#387 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:20,942 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/ed4c7ec331cf48a9905f7ffac00c7bcf is 50, key is test_row_0/B:col10/1734237618779/Put/seqid=0 2024-12-15T04:40:20,942 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7c960dc144bb2e403b8a500d7a170ddd#A#compaction#388 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:20,942 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/21d458966fd74c87bb19684d9881102f is 50, key is test_row_0/A:col10/1734237618779/Put/seqid=0 2024-12-15T04:40:20,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742292_1468 (size=12359) 2024-12-15T04:40:20,949 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:20,949 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-12-15T04:40:20,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:20,949 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing 7c960dc144bb2e403b8a500d7a170ddd 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-15T04:40:20,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=A 2024-12-15T04:40:20,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:20,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=B 2024-12-15T04:40:20,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:20,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=C 2024-12-15T04:40:20,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:20,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742293_1469 (size=12359) 2024-12-15T04:40:20,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/090ccd24cde249bb8b03dd58445076a7 is 50, key is test_row_0/A:col10/1734237619707/Put/seqid=0 2024-12-15T04:40:20,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742294_1470 (size=12151) 2024-12-15T04:40:21,352 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/ed4c7ec331cf48a9905f7ffac00c7bcf as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/ed4c7ec331cf48a9905f7ffac00c7bcf 2024-12-15T04:40:21,355 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7c960dc144bb2e403b8a500d7a170ddd/B of 7c960dc144bb2e403b8a500d7a170ddd into ed4c7ec331cf48a9905f7ffac00c7bcf(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:21,355 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:21,355 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., storeName=7c960dc144bb2e403b8a500d7a170ddd/B, priority=13, startTime=1734237620934; duration=0sec 2024-12-15T04:40:21,355 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:21,355 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7c960dc144bb2e403b8a500d7a170ddd:B 2024-12-15T04:40:21,355 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:21,356 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:21,356 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 7c960dc144bb2e403b8a500d7a170ddd/C is initiating minor compaction (all files) 2024-12-15T04:40:21,356 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7c960dc144bb2e403b8a500d7a170ddd/C in TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:21,356 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/5e35eae8fea3424687ed34bea2afe83d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/f30b772d908d42308ded31b4a25a5445, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/f97b27c787d240fda1185d8a0a93e5dd] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp, totalSize=35.4 K 2024-12-15T04:40:21,356 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e35eae8fea3424687ed34bea2afe83d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1734237615556 2024-12-15T04:40:21,356 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting f30b772d908d42308ded31b4a25a5445, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1734237616624 2024-12-15T04:40:21,357 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting f97b27c787d240fda1185d8a0a93e5dd, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734237618769 2024-12-15T04:40:21,361 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/21d458966fd74c87bb19684d9881102f as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/21d458966fd74c87bb19684d9881102f 2024-12-15T04:40:21,363 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7c960dc144bb2e403b8a500d7a170ddd#C#compaction#390 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:21,363 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/7b17604755ce41fda622ecd1f6d0c739 is 50, key is test_row_0/C:col10/1734237618779/Put/seqid=0 2024-12-15T04:40:21,364 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7c960dc144bb2e403b8a500d7a170ddd/A of 7c960dc144bb2e403b8a500d7a170ddd into 21d458966fd74c87bb19684d9881102f(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:21,365 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:21,365 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., storeName=7c960dc144bb2e403b8a500d7a170ddd/A, priority=13, startTime=1734237620934; duration=0sec 2024-12-15T04:40:21,365 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:21,365 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7c960dc144bb2e403b8a500d7a170ddd:A 2024-12-15T04:40:21,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742295_1471 (size=12359) 2024-12-15T04:40:21,369 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/090ccd24cde249bb8b03dd58445076a7 2024-12-15T04:40:21,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/70a762830f2b4553b74894a4213fc872 is 50, key is test_row_0/B:col10/1734237619707/Put/seqid=0 2024-12-15T04:40:21,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742296_1472 (size=12151) 2024-12-15T04:40:21,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-15T04:40:21,770 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/7b17604755ce41fda622ecd1f6d0c739 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/7b17604755ce41fda622ecd1f6d0c739 2024-12-15T04:40:21,774 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7c960dc144bb2e403b8a500d7a170ddd/C of 7c960dc144bb2e403b8a500d7a170ddd into 7b17604755ce41fda622ecd1f6d0c739(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:21,774 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:21,774 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., storeName=7c960dc144bb2e403b8a500d7a170ddd/C, priority=13, startTime=1734237620935; duration=0sec 2024-12-15T04:40:21,774 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:21,774 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7c960dc144bb2e403b8a500d7a170ddd:C 2024-12-15T04:40:21,777 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/70a762830f2b4553b74894a4213fc872 2024-12-15T04:40:21,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/388d364c5a2842d6af89c640b73080e7 is 50, key is test_row_0/C:col10/1734237619707/Put/seqid=0 2024-12-15T04:40:21,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742297_1473 (size=12151) 2024-12-15T04:40:21,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:21,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:21,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:21,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237681850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:21,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:21,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237681853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:21,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:21,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237681853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:21,859 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:21,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237681854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:21,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:21,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237681955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:21,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:21,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237681957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:21,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:21,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237681958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:21,961 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:21,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237681960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:22,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:22,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237682158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:22,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:22,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237682160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:22,164 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:22,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237682162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:22,164 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:22,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237682162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:22,187 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=152 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/388d364c5a2842d6af89c640b73080e7 2024-12-15T04:40:22,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/090ccd24cde249bb8b03dd58445076a7 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/090ccd24cde249bb8b03dd58445076a7 2024-12-15T04:40:22,193 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/090ccd24cde249bb8b03dd58445076a7, entries=150, sequenceid=152, filesize=11.9 K 2024-12-15T04:40:22,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/70a762830f2b4553b74894a4213fc872 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/70a762830f2b4553b74894a4213fc872 2024-12-15T04:40:22,195 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/70a762830f2b4553b74894a4213fc872, entries=150, sequenceid=152, filesize=11.9 K 2024-12-15T04:40:22,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/388d364c5a2842d6af89c640b73080e7 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/388d364c5a2842d6af89c640b73080e7 2024-12-15T04:40:22,198 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/388d364c5a2842d6af89c640b73080e7, entries=150, sequenceid=152, filesize=11.9 K 2024-12-15T04:40:22,199 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 7c960dc144bb2e403b8a500d7a170ddd in 1250ms, sequenceid=152, compaction requested=false 2024-12-15T04:40:22,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:22,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:22,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-12-15T04:40:22,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-12-15T04:40:22,201 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-12-15T04:40:22,201 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6190 sec 2024-12-15T04:40:22,201 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 2.6220 sec 2024-12-15T04:40:22,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:22,464 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7c960dc144bb2e403b8a500d7a170ddd 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-15T04:40:22,464 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=A 2024-12-15T04:40:22,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:22,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=B 2024-12-15T04:40:22,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:22,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=C 2024-12-15T04:40:22,465 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:22,467 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/c32cb4fcdc6441ce957c5e61cb179e83 is 50, key is test_row_0/A:col10/1734237621852/Put/seqid=0 2024-12-15T04:40:22,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742298_1474 (size=14541) 2024-12-15T04:40:22,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:22,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237682486, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:22,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:22,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237682493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:22,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:22,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237682494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:22,498 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:22,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237682494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:22,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:22,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237682595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:22,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:22,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237682598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:22,602 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:22,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:22,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237682598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:22,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237682599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:22,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:22,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237682799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:22,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:22,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237682803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:22,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:22,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237682804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:22,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:22,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237682804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:22,873 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/c32cb4fcdc6441ce957c5e61cb179e83 2024-12-15T04:40:22,878 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/c77c4ea8ac404322a3db3e0b89b368f6 is 50, key is test_row_0/B:col10/1734237621852/Put/seqid=0 2024-12-15T04:40:22,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742299_1475 (size=12151) 2024-12-15T04:40:22,924 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:22,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41330 deadline: 1734237682921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:22,925 DEBUG [Thread-1971 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4144 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., hostname=e56de37b85b3,43199,1734237482035, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:40:23,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:23,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237683103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:23,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:23,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237683108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:23,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:23,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237683108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:23,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:23,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237683109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:23,282 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/c77c4ea8ac404322a3db3e0b89b368f6 2024-12-15T04:40:23,287 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/4e683221ad9245438be08be39a5a7988 is 50, key is test_row_0/C:col10/1734237621852/Put/seqid=0 2024-12-15T04:40:23,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742300_1476 (size=12151) 2024-12-15T04:40:23,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:23,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237683606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:23,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:23,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237683612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:23,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:23,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237683614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:23,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:23,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237683614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:23,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-12-15T04:40:23,689 INFO [Thread-1979 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-12-15T04:40:23,690 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/4e683221ad9245438be08be39a5a7988 2024-12-15T04:40:23,691 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:40:23,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-12-15T04:40:23,692 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:40:23,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-15T04:40:23,693 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:40:23,693 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:40:23,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/c32cb4fcdc6441ce957c5e61cb179e83 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/c32cb4fcdc6441ce957c5e61cb179e83 2024-12-15T04:40:23,699 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/c32cb4fcdc6441ce957c5e61cb179e83, entries=200, sequenceid=170, filesize=14.2 K 2024-12-15T04:40:23,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/c77c4ea8ac404322a3db3e0b89b368f6 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/c77c4ea8ac404322a3db3e0b89b368f6 2024-12-15T04:40:23,702 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/c77c4ea8ac404322a3db3e0b89b368f6, entries=150, sequenceid=170, filesize=11.9 K 2024-12-15T04:40:23,703 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/4e683221ad9245438be08be39a5a7988 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/4e683221ad9245438be08be39a5a7988 2024-12-15T04:40:23,707 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/4e683221ad9245438be08be39a5a7988, entries=150, sequenceid=170, filesize=11.9 K 2024-12-15T04:40:23,707 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 7c960dc144bb2e403b8a500d7a170ddd in 1243ms, sequenceid=170, compaction requested=true 2024-12-15T04:40:23,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:23,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7c960dc144bb2e403b8a500d7a170ddd:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:40:23,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:23,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7c960dc144bb2e403b8a500d7a170ddd:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:40:23,708 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:23,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:23,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7c960dc144bb2e403b8a500d7a170ddd:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:40:23,708 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:23,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:23,709 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:23,709 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39051 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:23,709 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 7c960dc144bb2e403b8a500d7a170ddd/A is initiating minor compaction (all files) 2024-12-15T04:40:23,709 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 7c960dc144bb2e403b8a500d7a170ddd/B is initiating minor compaction (all files) 2024-12-15T04:40:23,709 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7c960dc144bb2e403b8a500d7a170ddd/B in TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:23,709 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7c960dc144bb2e403b8a500d7a170ddd/A in TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:23,709 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/21d458966fd74c87bb19684d9881102f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/090ccd24cde249bb8b03dd58445076a7, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/c32cb4fcdc6441ce957c5e61cb179e83] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp, totalSize=38.1 K 2024-12-15T04:40:23,709 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/ed4c7ec331cf48a9905f7ffac00c7bcf, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/70a762830f2b4553b74894a4213fc872, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/c77c4ea8ac404322a3db3e0b89b368f6] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp, totalSize=35.8 K 2024-12-15T04:40:23,710 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting ed4c7ec331cf48a9905f7ffac00c7bcf, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734237618769 2024-12-15T04:40:23,710 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 21d458966fd74c87bb19684d9881102f, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734237618769 2024-12-15T04:40:23,710 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 70a762830f2b4553b74894a4213fc872, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1734237619707 2024-12-15T04:40:23,710 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 090ccd24cde249bb8b03dd58445076a7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1734237619707 2024-12-15T04:40:23,710 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting c32cb4fcdc6441ce957c5e61cb179e83, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1734237621849 2024-12-15T04:40:23,710 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting c77c4ea8ac404322a3db3e0b89b368f6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1734237621852 2024-12-15T04:40:23,716 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7c960dc144bb2e403b8a500d7a170ddd#B#compaction#396 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:23,717 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/bf726a9e440841328fbe51f78e6f0a0f is 50, key is test_row_0/B:col10/1734237621852/Put/seqid=0 2024-12-15T04:40:23,717 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7c960dc144bb2e403b8a500d7a170ddd#A#compaction#397 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:23,718 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/02962749ca844cb59394440372ffc202 is 50, key is test_row_0/A:col10/1734237621852/Put/seqid=0 2024-12-15T04:40:23,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742301_1477 (size=12561) 2024-12-15T04:40:23,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742302_1478 (size=12561) 2024-12-15T04:40:23,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-15T04:40:23,844 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:23,844 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-12-15T04:40:23,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:23,845 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing 7c960dc144bb2e403b8a500d7a170ddd 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-15T04:40:23,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=A 2024-12-15T04:40:23,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:23,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=B 2024-12-15T04:40:23,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:23,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=C 2024-12-15T04:40:23,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:23,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/428b89d07f2a40a2b8a725ed086b0f17 is 50, key is test_row_0/A:col10/1734237622485/Put/seqid=0 2024-12-15T04:40:23,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742303_1479 (size=12151) 2024-12-15T04:40:23,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-15T04:40:24,133 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/02962749ca844cb59394440372ffc202 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/02962749ca844cb59394440372ffc202 2024-12-15T04:40:24,136 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/bf726a9e440841328fbe51f78e6f0a0f as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/bf726a9e440841328fbe51f78e6f0a0f 2024-12-15T04:40:24,137 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7c960dc144bb2e403b8a500d7a170ddd/A of 7c960dc144bb2e403b8a500d7a170ddd into 02962749ca844cb59394440372ffc202(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:24,137 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:24,137 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., storeName=7c960dc144bb2e403b8a500d7a170ddd/A, priority=13, startTime=1734237623708; duration=0sec 2024-12-15T04:40:24,137 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:24,137 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7c960dc144bb2e403b8a500d7a170ddd:A 2024-12-15T04:40:24,137 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:24,138 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:24,138 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 7c960dc144bb2e403b8a500d7a170ddd/C is initiating minor compaction (all files) 2024-12-15T04:40:24,138 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7c960dc144bb2e403b8a500d7a170ddd/C in TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:24,138 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/7b17604755ce41fda622ecd1f6d0c739, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/388d364c5a2842d6af89c640b73080e7, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/4e683221ad9245438be08be39a5a7988] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp, totalSize=35.8 K 2024-12-15T04:40:24,139 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b17604755ce41fda622ecd1f6d0c739, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=129, earliestPutTs=1734237618769 2024-12-15T04:40:24,139 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 388d364c5a2842d6af89c640b73080e7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=152, earliestPutTs=1734237619707 2024-12-15T04:40:24,140 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7c960dc144bb2e403b8a500d7a170ddd/B of 7c960dc144bb2e403b8a500d7a170ddd into bf726a9e440841328fbe51f78e6f0a0f(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:24,140 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:24,140 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., storeName=7c960dc144bb2e403b8a500d7a170ddd/B, priority=13, startTime=1734237623708; duration=0sec 2024-12-15T04:40:24,140 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:24,140 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7c960dc144bb2e403b8a500d7a170ddd:B 2024-12-15T04:40:24,142 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e683221ad9245438be08be39a5a7988, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1734237621852 2024-12-15T04:40:24,147 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7c960dc144bb2e403b8a500d7a170ddd#C#compaction#399 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:24,147 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/46f0f31049e34d2cb8360a4a969131ac is 50, key is test_row_0/C:col10/1734237621852/Put/seqid=0 2024-12-15T04:40:24,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742304_1480 (size=12561) 2024-12-15T04:40:24,160 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/46f0f31049e34d2cb8360a4a969131ac as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/46f0f31049e34d2cb8360a4a969131ac 2024-12-15T04:40:24,163 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7c960dc144bb2e403b8a500d7a170ddd/C of 7c960dc144bb2e403b8a500d7a170ddd into 46f0f31049e34d2cb8360a4a969131ac(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:24,163 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:24,163 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., storeName=7c960dc144bb2e403b8a500d7a170ddd/C, priority=13, startTime=1734237623708; duration=0sec 2024-12-15T04:40:24,163 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:24,163 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7c960dc144bb2e403b8a500d7a170ddd:C 2024-12-15T04:40:24,253 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/428b89d07f2a40a2b8a725ed086b0f17 2024-12-15T04:40:24,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/d92cb602e1fa4565a0e814306e157f35 is 50, key is test_row_0/B:col10/1734237622485/Put/seqid=0 2024-12-15T04:40:24,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742305_1481 (size=12151) 2024-12-15T04:40:24,262 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/d92cb602e1fa4565a0e814306e157f35 2024-12-15T04:40:24,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/0b029b088d294e799578de2a99164c05 is 50, key is test_row_0/C:col10/1734237622485/Put/seqid=0 2024-12-15T04:40:24,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742306_1482 (size=12151) 2024-12-15T04:40:24,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-15T04:40:24,620 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:24,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:24,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:24,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237684627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:24,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:24,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237684628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:24,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:24,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237684629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:24,633 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:24,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237684631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:24,670 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/0b029b088d294e799578de2a99164c05 2024-12-15T04:40:24,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/428b89d07f2a40a2b8a725ed086b0f17 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/428b89d07f2a40a2b8a725ed086b0f17 2024-12-15T04:40:24,676 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/428b89d07f2a40a2b8a725ed086b0f17, entries=150, sequenceid=192, filesize=11.9 K 2024-12-15T04:40:24,677 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/d92cb602e1fa4565a0e814306e157f35 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/d92cb602e1fa4565a0e814306e157f35 2024-12-15T04:40:24,679 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/d92cb602e1fa4565a0e814306e157f35, entries=150, sequenceid=192, filesize=11.9 K 2024-12-15T04:40:24,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/0b029b088d294e799578de2a99164c05 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/0b029b088d294e799578de2a99164c05 2024-12-15T04:40:24,683 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/0b029b088d294e799578de2a99164c05, entries=150, sequenceid=192, filesize=11.9 K 2024-12-15T04:40:24,683 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 7c960dc144bb2e403b8a500d7a170ddd in 839ms, sequenceid=192, compaction requested=false 2024-12-15T04:40:24,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:24,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:24,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-12-15T04:40:24,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-12-15T04:40:24,685 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-15T04:40:24,685 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 991 msec 2024-12-15T04:40:24,686 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 995 msec 2024-12-15T04:40:24,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:24,735 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7c960dc144bb2e403b8a500d7a170ddd 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-12-15T04:40:24,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=A 2024-12-15T04:40:24,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:24,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=B 2024-12-15T04:40:24,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:24,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=C 2024-12-15T04:40:24,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:24,739 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/d6aa7376bb3f4f158a53e10597c72a00 is 50, key is test_row_0/A:col10/1734237624626/Put/seqid=0 2024-12-15T04:40:24,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742307_1483 (size=12151) 2024-12-15T04:40:24,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:24,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237684754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:24,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:24,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237684754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:24,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:24,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237684755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:24,764 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:24,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237684759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:24,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-12-15T04:40:24,796 INFO [Thread-1979 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-12-15T04:40:24,797 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:40:24,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-12-15T04:40:24,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-15T04:40:24,798 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:40:24,799 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:40:24,799 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:40:24,861 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:24,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237684860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:24,862 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:24,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237684860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:24,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:24,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237684861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:24,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:24,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237684865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:24,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-15T04:40:24,950 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:24,950 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-15T04:40:24,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:24,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:24,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:24,951 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:24,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:24,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:25,068 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:25,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:25,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237685062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:25,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237685062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:25,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:25,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237685064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:25,075 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:25,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237685070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:25,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-15T04:40:25,102 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:25,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-15T04:40:25,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:25,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:25,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:25,103 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:25,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:25,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:25,143 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/d6aa7376bb3f4f158a53e10597c72a00 2024-12-15T04:40:25,148 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/6063884dd3f2498c8ad271f0e007c997 is 50, key is test_row_0/B:col10/1734237624626/Put/seqid=0 2024-12-15T04:40:25,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742308_1484 (size=12151) 2024-12-15T04:40:25,254 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:25,255 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-15T04:40:25,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:25,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:25,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:25,255 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:25,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:25,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:25,371 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:25,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237685369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:25,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:25,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237685370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:25,372 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:25,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237685370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:25,380 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:25,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237685378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:25,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-15T04:40:25,406 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:25,407 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-15T04:40:25,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:25,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:25,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:25,407 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:25,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:25,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:25,552 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/6063884dd3f2498c8ad271f0e007c997 2024-12-15T04:40:25,557 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/33679dd7408145ac87766650ae51fc8a is 50, key is test_row_0/C:col10/1734237624626/Put/seqid=0 2024-12-15T04:40:25,559 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:25,559 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-15T04:40:25,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:25,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:25,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:25,559 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:25,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:25,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742309_1485 (size=12151) 2024-12-15T04:40:25,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:25,711 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:25,711 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-15T04:40:25,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:25,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:25,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:25,711 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:25,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:25,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:25,863 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:25,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-15T04:40:25,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:25,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:25,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:25,863 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:25,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:25,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:25,875 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:25,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237685872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:25,875 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:25,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237685873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:25,876 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:25,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237685874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:25,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:25,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237685882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:25,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-15T04:40:25,960 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/33679dd7408145ac87766650ae51fc8a 2024-12-15T04:40:25,963 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/d6aa7376bb3f4f158a53e10597c72a00 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/d6aa7376bb3f4f158a53e10597c72a00 2024-12-15T04:40:25,965 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/d6aa7376bb3f4f158a53e10597c72a00, entries=150, sequenceid=211, filesize=11.9 K 2024-12-15T04:40:25,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/6063884dd3f2498c8ad271f0e007c997 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/6063884dd3f2498c8ad271f0e007c997 2024-12-15T04:40:25,969 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/6063884dd3f2498c8ad271f0e007c997, entries=150, sequenceid=211, filesize=11.9 K 2024-12-15T04:40:25,969 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/33679dd7408145ac87766650ae51fc8a as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/33679dd7408145ac87766650ae51fc8a 2024-12-15T04:40:25,972 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/33679dd7408145ac87766650ae51fc8a, entries=150, sequenceid=211, filesize=11.9 K 2024-12-15T04:40:25,973 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 7c960dc144bb2e403b8a500d7a170ddd in 1237ms, sequenceid=211, compaction requested=true 2024-12-15T04:40:25,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:25,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7c960dc144bb2e403b8a500d7a170ddd:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:40:25,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:25,973 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:25,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7c960dc144bb2e403b8a500d7a170ddd:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:40:25,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:25,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7c960dc144bb2e403b8a500d7a170ddd:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:40:25,973 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:25,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:25,973 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:25,973 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:25,974 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 7c960dc144bb2e403b8a500d7a170ddd/A is initiating minor compaction (all files) 2024-12-15T04:40:25,974 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 7c960dc144bb2e403b8a500d7a170ddd/B is initiating minor compaction (all files) 2024-12-15T04:40:25,974 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7c960dc144bb2e403b8a500d7a170ddd/A in TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:25,974 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7c960dc144bb2e403b8a500d7a170ddd/B in TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:25,974 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/02962749ca844cb59394440372ffc202, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/428b89d07f2a40a2b8a725ed086b0f17, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/d6aa7376bb3f4f158a53e10597c72a00] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp, totalSize=36.0 K 2024-12-15T04:40:25,974 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/bf726a9e440841328fbe51f78e6f0a0f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/d92cb602e1fa4565a0e814306e157f35, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/6063884dd3f2498c8ad271f0e007c997] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp, totalSize=36.0 K 2024-12-15T04:40:25,974 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 02962749ca844cb59394440372ffc202, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1734237621852 2024-12-15T04:40:25,974 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting bf726a9e440841328fbe51f78e6f0a0f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1734237621852 2024-12-15T04:40:25,974 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 428b89d07f2a40a2b8a725ed086b0f17, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1734237622484 2024-12-15T04:40:25,974 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting d92cb602e1fa4565a0e814306e157f35, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1734237622484 2024-12-15T04:40:25,974 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting d6aa7376bb3f4f158a53e10597c72a00, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734237624626 2024-12-15T04:40:25,974 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 6063884dd3f2498c8ad271f0e007c997, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734237624626 2024-12-15T04:40:25,979 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7c960dc144bb2e403b8a500d7a170ddd#B#compaction#405 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:25,979 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7c960dc144bb2e403b8a500d7a170ddd#A#compaction#406 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:25,980 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/90d39c5097af4214829ca27aed1ba2d7 is 50, key is test_row_0/B:col10/1734237624626/Put/seqid=0 2024-12-15T04:40:25,980 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/f4b4f3ee48114ac0ab7e95d2c06aecaa is 50, key is test_row_0/A:col10/1734237624626/Put/seqid=0 2024-12-15T04:40:25,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742311_1487 (size=12663) 2024-12-15T04:40:25,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742310_1486 (size=12663) 2024-12-15T04:40:25,986 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/90d39c5097af4214829ca27aed1ba2d7 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/90d39c5097af4214829ca27aed1ba2d7 2024-12-15T04:40:25,989 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7c960dc144bb2e403b8a500d7a170ddd/B of 7c960dc144bb2e403b8a500d7a170ddd into 90d39c5097af4214829ca27aed1ba2d7(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:25,989 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:25,990 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., storeName=7c960dc144bb2e403b8a500d7a170ddd/B, priority=13, startTime=1734237625973; duration=0sec 2024-12-15T04:40:25,990 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:25,990 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7c960dc144bb2e403b8a500d7a170ddd:B 2024-12-15T04:40:25,990 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:25,990 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:25,990 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 7c960dc144bb2e403b8a500d7a170ddd/C is initiating minor compaction (all files) 2024-12-15T04:40:25,990 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7c960dc144bb2e403b8a500d7a170ddd/C in TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:25,991 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/46f0f31049e34d2cb8360a4a969131ac, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/0b029b088d294e799578de2a99164c05, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/33679dd7408145ac87766650ae51fc8a] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp, totalSize=36.0 K 2024-12-15T04:40:25,991 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 46f0f31049e34d2cb8360a4a969131ac, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1734237621852 2024-12-15T04:40:25,991 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b029b088d294e799578de2a99164c05, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1734237622484 2024-12-15T04:40:25,991 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 33679dd7408145ac87766650ae51fc8a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734237624626 2024-12-15T04:40:25,995 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7c960dc144bb2e403b8a500d7a170ddd#C#compaction#407 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:25,995 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/9f92efa973384d2f852f4b1100efb32a is 50, key is test_row_0/C:col10/1734237624626/Put/seqid=0 2024-12-15T04:40:25,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742312_1488 (size=12663) 2024-12-15T04:40:26,002 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/9f92efa973384d2f852f4b1100efb32a as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/9f92efa973384d2f852f4b1100efb32a 2024-12-15T04:40:26,005 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7c960dc144bb2e403b8a500d7a170ddd/C of 7c960dc144bb2e403b8a500d7a170ddd into 9f92efa973384d2f852f4b1100efb32a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:26,005 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:26,005 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., storeName=7c960dc144bb2e403b8a500d7a170ddd/C, priority=13, startTime=1734237625973; duration=0sec 2024-12-15T04:40:26,005 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:26,005 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7c960dc144bb2e403b8a500d7a170ddd:C 2024-12-15T04:40:26,015 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:26,015 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-12-15T04:40:26,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:26,015 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing 7c960dc144bb2e403b8a500d7a170ddd 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-12-15T04:40:26,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=A 2024-12-15T04:40:26,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:26,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=B 2024-12-15T04:40:26,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:26,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=C 2024-12-15T04:40:26,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:26,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/1c28411a9d3d4b568ff9224457d59e55 is 50, key is test_row_0/A:col10/1734237624748/Put/seqid=0 2024-12-15T04:40:26,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742313_1489 (size=12151) 2024-12-15T04:40:26,386 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/f4b4f3ee48114ac0ab7e95d2c06aecaa as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/f4b4f3ee48114ac0ab7e95d2c06aecaa 2024-12-15T04:40:26,390 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7c960dc144bb2e403b8a500d7a170ddd/A of 7c960dc144bb2e403b8a500d7a170ddd into f4b4f3ee48114ac0ab7e95d2c06aecaa(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:26,390 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:26,390 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., storeName=7c960dc144bb2e403b8a500d7a170ddd/A, priority=13, startTime=1734237625973; duration=0sec 2024-12-15T04:40:26,390 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:26,390 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7c960dc144bb2e403b8a500d7a170ddd:A 2024-12-15T04:40:26,423 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/1c28411a9d3d4b568ff9224457d59e55 2024-12-15T04:40:26,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/6ca9abacf7084d55b0ec9e91378683b2 is 50, key is test_row_0/B:col10/1734237624748/Put/seqid=0 2024-12-15T04:40:26,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742314_1490 (size=12151) 2024-12-15T04:40:26,832 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/6ca9abacf7084d55b0ec9e91378683b2 2024-12-15T04:40:26,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/f5d89eec37094dfaae42796dab643f12 is 50, key is test_row_0/C:col10/1734237624748/Put/seqid=0 2024-12-15T04:40:26,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742315_1491 (size=12151) 2024-12-15T04:40:26,883 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:26,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:26,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:26,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237686894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:26,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:26,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237686895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:26,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-15T04:40:26,901 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:26,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237686898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:26,902 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:26,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237686899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:26,954 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:26,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41330 deadline: 1734237686951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:26,954 DEBUG [Thread-1971 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8174 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., hostname=e56de37b85b3,43199,1734237482035, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:40:27,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:27,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237687000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:27,005 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:27,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237687000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:27,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:27,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237687002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:27,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:27,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237687003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:27,210 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:27,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237687206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:27,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:27,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237687206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:27,211 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:27,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:27,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237687207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:27,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237687207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:27,242 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=233 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/f5d89eec37094dfaae42796dab643f12 2024-12-15T04:40:27,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/1c28411a9d3d4b568ff9224457d59e55 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/1c28411a9d3d4b568ff9224457d59e55 2024-12-15T04:40:27,248 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/1c28411a9d3d4b568ff9224457d59e55, entries=150, sequenceid=233, filesize=11.9 K 2024-12-15T04:40:27,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/6ca9abacf7084d55b0ec9e91378683b2 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/6ca9abacf7084d55b0ec9e91378683b2 2024-12-15T04:40:27,251 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/6ca9abacf7084d55b0ec9e91378683b2, entries=150, sequenceid=233, filesize=11.9 K 2024-12-15T04:40:27,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/f5d89eec37094dfaae42796dab643f12 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/f5d89eec37094dfaae42796dab643f12 2024-12-15T04:40:27,255 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/f5d89eec37094dfaae42796dab643f12, entries=150, sequenceid=233, filesize=11.9 K 2024-12-15T04:40:27,256 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 7c960dc144bb2e403b8a500d7a170ddd in 1241ms, sequenceid=233, compaction requested=false 2024-12-15T04:40:27,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:27,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:27,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-12-15T04:40:27,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-12-15T04:40:27,258 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-12-15T04:40:27,258 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4580 sec 2024-12-15T04:40:27,258 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 2.4610 sec 2024-12-15T04:40:27,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:27,515 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7c960dc144bb2e403b8a500d7a170ddd 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-12-15T04:40:27,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=A 2024-12-15T04:40:27,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:27,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=B 2024-12-15T04:40:27,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:27,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=C 2024-12-15T04:40:27,516 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:27,519 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/5cb5c44685b0473ca9c59054bc6297f1 is 50, key is test_row_0/A:col10/1734237626898/Put/seqid=0 2024-12-15T04:40:27,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742316_1492 (size=14541) 2024-12-15T04:40:27,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:27,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237687532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:27,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:27,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237687534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:27,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:27,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237687534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:27,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:27,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237687535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:27,638 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:27,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237687637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:27,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:27,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237687639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:27,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:27,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237687639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:27,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:27,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237687639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:27,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:27,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237687840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:27,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:27,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237687843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:27,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:27,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237687843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:27,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:27,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237687843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:27,945 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/5cb5c44685b0473ca9c59054bc6297f1 2024-12-15T04:40:27,951 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/357fae7d6c3744f28e740e236f32f3e8 is 50, key is test_row_0/B:col10/1734237626898/Put/seqid=0 2024-12-15T04:40:27,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742317_1493 (size=12151) 2024-12-15T04:40:27,954 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/357fae7d6c3744f28e740e236f32f3e8 2024-12-15T04:40:27,959 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/c0895091b0b147f6995fc3c7c78d0ef1 is 50, key is test_row_0/C:col10/1734237626898/Put/seqid=0 2024-12-15T04:40:27,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742318_1494 (size=12151) 2024-12-15T04:40:28,147 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:28,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237688145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:28,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:28,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237688145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:28,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:28,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237688147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:28,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:28,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237688147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:28,362 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/c0895091b0b147f6995fc3c7c78d0ef1 2024-12-15T04:40:28,365 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/5cb5c44685b0473ca9c59054bc6297f1 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/5cb5c44685b0473ca9c59054bc6297f1 2024-12-15T04:40:28,368 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/5cb5c44685b0473ca9c59054bc6297f1, entries=200, sequenceid=253, filesize=14.2 K 2024-12-15T04:40:28,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/357fae7d6c3744f28e740e236f32f3e8 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/357fae7d6c3744f28e740e236f32f3e8 2024-12-15T04:40:28,375 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/357fae7d6c3744f28e740e236f32f3e8, entries=150, sequenceid=253, filesize=11.9 K 2024-12-15T04:40:28,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/c0895091b0b147f6995fc3c7c78d0ef1 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/c0895091b0b147f6995fc3c7c78d0ef1 2024-12-15T04:40:28,378 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/c0895091b0b147f6995fc3c7c78d0ef1, entries=150, sequenceid=253, filesize=11.9 K 2024-12-15T04:40:28,379 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 7c960dc144bb2e403b8a500d7a170ddd in 863ms, sequenceid=253, compaction requested=true 2024-12-15T04:40:28,379 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:28,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7c960dc144bb2e403b8a500d7a170ddd:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:40:28,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:28,379 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:28,379 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:28,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7c960dc144bb2e403b8a500d7a170ddd:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:40:28,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:28,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7c960dc144bb2e403b8a500d7a170ddd:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:40:28,379 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:28,380 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39355 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:28,380 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:28,380 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 7c960dc144bb2e403b8a500d7a170ddd/B is initiating minor compaction (all files) 2024-12-15T04:40:28,380 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 7c960dc144bb2e403b8a500d7a170ddd/A is initiating minor compaction (all files) 2024-12-15T04:40:28,380 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7c960dc144bb2e403b8a500d7a170ddd/B in TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:28,380 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7c960dc144bb2e403b8a500d7a170ddd/A in TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:28,380 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/90d39c5097af4214829ca27aed1ba2d7, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/6ca9abacf7084d55b0ec9e91378683b2, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/357fae7d6c3744f28e740e236f32f3e8] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp, totalSize=36.1 K 2024-12-15T04:40:28,380 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/f4b4f3ee48114ac0ab7e95d2c06aecaa, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/1c28411a9d3d4b568ff9224457d59e55, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/5cb5c44685b0473ca9c59054bc6297f1] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp, totalSize=38.4 K 2024-12-15T04:40:28,380 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4b4f3ee48114ac0ab7e95d2c06aecaa, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734237624626 2024-12-15T04:40:28,380 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 90d39c5097af4214829ca27aed1ba2d7, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734237624626 2024-12-15T04:40:28,380 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ca9abacf7084d55b0ec9e91378683b2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1734237624748 2024-12-15T04:40:28,380 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c28411a9d3d4b568ff9224457d59e55, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1734237624748 2024-12-15T04:40:28,381 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 357fae7d6c3744f28e740e236f32f3e8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734237626898 2024-12-15T04:40:28,381 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5cb5c44685b0473ca9c59054bc6297f1, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734237626898 2024-12-15T04:40:28,386 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7c960dc144bb2e403b8a500d7a170ddd#A#compaction#414 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:28,386 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7c960dc144bb2e403b8a500d7a170ddd#B#compaction#415 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:28,386 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/bff21cbbe6b049d19a50914d3aaca90f is 50, key is test_row_0/A:col10/1734237626898/Put/seqid=0 2024-12-15T04:40:28,386 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/d36984c5a6ec4f88bc48431fd0fd70c0 is 50, key is test_row_0/B:col10/1734237626898/Put/seqid=0 2024-12-15T04:40:28,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742319_1495 (size=12765) 2024-12-15T04:40:28,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742320_1496 (size=12765) 2024-12-15T04:40:28,393 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/bff21cbbe6b049d19a50914d3aaca90f as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/bff21cbbe6b049d19a50914d3aaca90f 2024-12-15T04:40:28,393 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/d36984c5a6ec4f88bc48431fd0fd70c0 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/d36984c5a6ec4f88bc48431fd0fd70c0 2024-12-15T04:40:28,397 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7c960dc144bb2e403b8a500d7a170ddd/A of 7c960dc144bb2e403b8a500d7a170ddd into bff21cbbe6b049d19a50914d3aaca90f(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:28,397 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7c960dc144bb2e403b8a500d7a170ddd/B of 7c960dc144bb2e403b8a500d7a170ddd into d36984c5a6ec4f88bc48431fd0fd70c0(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:28,397 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:28,397 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:28,397 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., storeName=7c960dc144bb2e403b8a500d7a170ddd/A, priority=13, startTime=1734237628379; duration=0sec 2024-12-15T04:40:28,397 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., storeName=7c960dc144bb2e403b8a500d7a170ddd/B, priority=13, startTime=1734237628379; duration=0sec 2024-12-15T04:40:28,398 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:28,398 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7c960dc144bb2e403b8a500d7a170ddd:A 2024-12-15T04:40:28,398 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:28,398 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7c960dc144bb2e403b8a500d7a170ddd:B 2024-12-15T04:40:28,398 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:28,398 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:28,398 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 7c960dc144bb2e403b8a500d7a170ddd/C is initiating minor compaction (all files) 2024-12-15T04:40:28,398 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7c960dc144bb2e403b8a500d7a170ddd/C in TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:28,399 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/9f92efa973384d2f852f4b1100efb32a, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/f5d89eec37094dfaae42796dab643f12, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/c0895091b0b147f6995fc3c7c78d0ef1] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp, totalSize=36.1 K 2024-12-15T04:40:28,399 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f92efa973384d2f852f4b1100efb32a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1734237624626 2024-12-15T04:40:28,399 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting f5d89eec37094dfaae42796dab643f12, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=233, earliestPutTs=1734237624748 2024-12-15T04:40:28,399 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0895091b0b147f6995fc3c7c78d0ef1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734237626898 2024-12-15T04:40:28,403 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7c960dc144bb2e403b8a500d7a170ddd#C#compaction#416 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:28,403 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/c9adfcf2fe52488eb8fb6d30c6e86524 is 50, key is test_row_0/C:col10/1734237626898/Put/seqid=0 2024-12-15T04:40:28,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742321_1497 (size=12765) 2024-12-15T04:40:28,409 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/c9adfcf2fe52488eb8fb6d30c6e86524 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/c9adfcf2fe52488eb8fb6d30c6e86524 2024-12-15T04:40:28,412 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7c960dc144bb2e403b8a500d7a170ddd/C of 7c960dc144bb2e403b8a500d7a170ddd into c9adfcf2fe52488eb8fb6d30c6e86524(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:28,412 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:28,413 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., storeName=7c960dc144bb2e403b8a500d7a170ddd/C, priority=13, startTime=1734237628379; duration=0sec 2024-12-15T04:40:28,413 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:28,413 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7c960dc144bb2e403b8a500d7a170ddd:C 2024-12-15T04:40:28,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:28,650 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7c960dc144bb2e403b8a500d7a170ddd 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-15T04:40:28,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=A 2024-12-15T04:40:28,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:28,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=B 2024-12-15T04:40:28,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:28,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=C 2024-12-15T04:40:28,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:28,654 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/2b2e5d861f484f63b8533b77bbd353fe is 50, key is test_row_0/A:col10/1734237627533/Put/seqid=0 2024-12-15T04:40:28,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742322_1498 (size=14741) 2024-12-15T04:40:28,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:28,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237688692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:28,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:28,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237688692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:28,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:28,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237688692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:28,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:28,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237688695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:28,799 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:28,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237688796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:28,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:28,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237688797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:28,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:28,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237688798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:28,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:28,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237688799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:28,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-12-15T04:40:28,902 INFO [Thread-1979 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-12-15T04:40:28,903 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:40:28,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-12-15T04:40:28,904 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:40:28,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-15T04:40:28,904 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:40:28,904 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:40:29,004 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:29,004 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:29,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237689001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:29,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237689001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:29,004 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:29,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237689002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:29,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:29,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237689003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:29,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-15T04:40:29,055 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:29,056 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-15T04:40:29,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:29,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:29,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:29,056 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:29,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:29,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:29,064 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/2b2e5d861f484f63b8533b77bbd353fe 2024-12-15T04:40:29,071 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/d46143b51c4a43c2bb74ef1aee41f860 is 50, key is test_row_0/B:col10/1734237627533/Put/seqid=0 2024-12-15T04:40:29,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742323_1499 (size=12301) 2024-12-15T04:40:29,075 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/d46143b51c4a43c2bb74ef1aee41f860 2024-12-15T04:40:29,080 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/29157fe81dad4ca98b8dbd03df4a9701 is 50, key is test_row_0/C:col10/1734237627533/Put/seqid=0 2024-12-15T04:40:29,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742324_1500 (size=12301) 2024-12-15T04:40:29,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-15T04:40:29,208 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:29,208 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-15T04:40:29,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:29,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:29,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:29,208 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:29,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:29,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:29,309 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:29,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237689305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:29,309 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:29,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:29,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237689306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:29,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237689305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:29,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:29,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237689307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:29,360 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:29,361 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-15T04:40:29,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:29,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:29,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:29,361 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:29,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:29,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:29,484 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/29157fe81dad4ca98b8dbd03df4a9701 2024-12-15T04:40:29,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/2b2e5d861f484f63b8533b77bbd353fe as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/2b2e5d861f484f63b8533b77bbd353fe 2024-12-15T04:40:29,490 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/2b2e5d861f484f63b8533b77bbd353fe, entries=200, sequenceid=274, filesize=14.4 K 2024-12-15T04:40:29,491 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/d46143b51c4a43c2bb74ef1aee41f860 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/d46143b51c4a43c2bb74ef1aee41f860 2024-12-15T04:40:29,494 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/d46143b51c4a43c2bb74ef1aee41f860, entries=150, sequenceid=274, filesize=12.0 K 2024-12-15T04:40:29,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/29157fe81dad4ca98b8dbd03df4a9701 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/29157fe81dad4ca98b8dbd03df4a9701 2024-12-15T04:40:29,497 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/29157fe81dad4ca98b8dbd03df4a9701, entries=150, sequenceid=274, filesize=12.0 K 2024-12-15T04:40:29,497 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 7c960dc144bb2e403b8a500d7a170ddd in 847ms, sequenceid=274, compaction requested=false 2024-12-15T04:40:29,498 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:29,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-15T04:40:29,512 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:29,513 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-12-15T04:40:29,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:29,513 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing 7c960dc144bb2e403b8a500d7a170ddd 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-15T04:40:29,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=A 2024-12-15T04:40:29,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:29,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=B 2024-12-15T04:40:29,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:29,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=C 2024-12-15T04:40:29,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:29,516 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/670d3e60ca954600a9ab3d131d04743e is 50, key is test_row_0/A:col10/1734237628692/Put/seqid=0 2024-12-15T04:40:29,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742325_1501 (size=12301) 2024-12-15T04:40:29,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:29,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:29,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:29,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237689828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:29,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:29,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237689828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:29,835 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:29,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237689829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:29,835 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:29,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237689829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:29,920 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/670d3e60ca954600a9ab3d131d04743e 2024-12-15T04:40:29,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/3aebed82efe64e59ba38176ceeaf4595 is 50, key is test_row_0/B:col10/1734237628692/Put/seqid=0 2024-12-15T04:40:29,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742326_1502 (size=12301) 2024-12-15T04:40:29,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:29,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237689933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:29,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:29,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237689934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:29,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:29,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237689936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:29,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:29,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237689937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:30,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-15T04:40:30,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:30,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237690137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:30,139 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:30,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237690138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:30,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:30,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237690140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:30,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:30,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237690141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:30,331 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/3aebed82efe64e59ba38176ceeaf4595 2024-12-15T04:40:30,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/fb5e92462260489eb4418ee74938e2ff is 50, key is test_row_0/C:col10/1734237628692/Put/seqid=0 2024-12-15T04:40:30,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742327_1503 (size=12301) 2024-12-15T04:40:30,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:30,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237690442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:30,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:30,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237690442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:30,445 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:30,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237690442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:30,445 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:30,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237690444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:30,541 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-15T04:40:30,739 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/fb5e92462260489eb4418ee74938e2ff 2024-12-15T04:40:30,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/670d3e60ca954600a9ab3d131d04743e as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/670d3e60ca954600a9ab3d131d04743e 2024-12-15T04:40:30,744 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/670d3e60ca954600a9ab3d131d04743e, entries=150, sequenceid=292, filesize=12.0 K 2024-12-15T04:40:30,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/3aebed82efe64e59ba38176ceeaf4595 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/3aebed82efe64e59ba38176ceeaf4595 2024-12-15T04:40:30,747 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/3aebed82efe64e59ba38176ceeaf4595, entries=150, sequenceid=292, filesize=12.0 K 2024-12-15T04:40:30,748 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/fb5e92462260489eb4418ee74938e2ff as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/fb5e92462260489eb4418ee74938e2ff 2024-12-15T04:40:30,751 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/fb5e92462260489eb4418ee74938e2ff, entries=150, sequenceid=292, filesize=12.0 K 2024-12-15T04:40:30,751 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 7c960dc144bb2e403b8a500d7a170ddd in 1238ms, sequenceid=292, compaction requested=true 2024-12-15T04:40:30,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:30,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:30,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-12-15T04:40:30,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-12-15T04:40:30,753 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-12-15T04:40:30,753 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8480 sec 2024-12-15T04:40:30,754 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 1.8500 sec 2024-12-15T04:40:30,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:30,950 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7c960dc144bb2e403b8a500d7a170ddd 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-12-15T04:40:30,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=A 2024-12-15T04:40:30,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:30,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=B 2024-12-15T04:40:30,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:30,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=C 2024-12-15T04:40:30,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:30,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/430e23bb992a4da289369493d56eee59 is 50, key is test_row_0/A:col10/1734237629821/Put/seqid=0 2024-12-15T04:40:30,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742328_1504 (size=14741) 2024-12-15T04:40:30,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:30,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237690965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:30,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:30,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237690967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:30,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:30,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:30,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237690972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:30,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237690972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:31,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-15T04:40:31,007 INFO [Thread-1979 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-12-15T04:40:31,008 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:40:31,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-12-15T04:40:31,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-15T04:40:31,010 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:40:31,010 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:40:31,010 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:40:31,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:31,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237691073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:31,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:31,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237691073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:31,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:31,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237691076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:31,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:31,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237691076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:31,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-15T04:40:31,161 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:31,161 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-15T04:40:31,161 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:31,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:31,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:31,162 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:31,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:31,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:31,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:31,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237691276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:31,278 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:31,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237691276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:31,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:31,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237691279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:31,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:31,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237691279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:31,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-15T04:40:31,313 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:31,314 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-15T04:40:31,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:31,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:31,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:31,314 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:31,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:31,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:31,363 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/430e23bb992a4da289369493d56eee59 2024-12-15T04:40:31,367 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/26e9fd2de15341acb6417e69758fa3c5 is 50, key is test_row_0/B:col10/1734237629821/Put/seqid=0 2024-12-15T04:40:31,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742329_1505 (size=12301) 2024-12-15T04:40:31,465 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:31,466 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-15T04:40:31,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:31,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:31,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:31,466 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:31,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:31,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:31,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:31,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237691579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:31,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:31,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237691580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:31,587 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:31,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237691584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:31,587 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:31,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237691585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:31,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-15T04:40:31,618 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:31,618 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-15T04:40:31,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:31,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:31,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:31,618 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:31,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:31,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:31,770 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:31,770 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-15T04:40:31,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:31,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:31,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:31,770 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/26e9fd2de15341acb6417e69758fa3c5 2024-12-15T04:40:31,770 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:31,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:31,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:31,777 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/87fbd749b12d4fffb2559b1e6981be07 is 50, key is test_row_0/C:col10/1734237629821/Put/seqid=0 2024-12-15T04:40:31,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742330_1506 (size=12301) 2024-12-15T04:40:31,922 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:31,922 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-15T04:40:31,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:31,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:31,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:31,923 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:31,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:31,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:32,074 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:32,075 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-15T04:40:32,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:32,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:32,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:32,075 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:32,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:32,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:32,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:32,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41284 deadline: 1734237692082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:32,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:32,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41292 deadline: 1734237692083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:32,090 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:32,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41294 deadline: 1734237692090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:32,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:32,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:41376 deadline: 1734237692092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:32,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-15T04:40:32,180 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/87fbd749b12d4fffb2559b1e6981be07 2024-12-15T04:40:32,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/430e23bb992a4da289369493d56eee59 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/430e23bb992a4da289369493d56eee59 2024-12-15T04:40:32,185 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/430e23bb992a4da289369493d56eee59, entries=200, sequenceid=311, filesize=14.4 K 2024-12-15T04:40:32,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/26e9fd2de15341acb6417e69758fa3c5 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/26e9fd2de15341acb6417e69758fa3c5 2024-12-15T04:40:32,188 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/26e9fd2de15341acb6417e69758fa3c5, entries=150, sequenceid=311, filesize=12.0 K 2024-12-15T04:40:32,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/87fbd749b12d4fffb2559b1e6981be07 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/87fbd749b12d4fffb2559b1e6981be07 2024-12-15T04:40:32,191 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/87fbd749b12d4fffb2559b1e6981be07, entries=150, sequenceid=311, filesize=12.0 K 2024-12-15T04:40:32,191 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 7c960dc144bb2e403b8a500d7a170ddd in 1241ms, sequenceid=311, compaction requested=true 2024-12-15T04:40:32,191 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:32,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7c960dc144bb2e403b8a500d7a170ddd:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:40:32,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:32,192 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-15T04:40:32,192 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-15T04:40:32,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7c960dc144bb2e403b8a500d7a170ddd:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:40:32,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:32,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7c960dc144bb2e403b8a500d7a170ddd:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:40:32,192 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:32,193 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49668 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-15T04:40:32,193 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54548 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-15T04:40:32,193 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 7c960dc144bb2e403b8a500d7a170ddd/B is initiating minor compaction (all files) 2024-12-15T04:40:32,193 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 7c960dc144bb2e403b8a500d7a170ddd/A is initiating minor compaction (all files) 2024-12-15T04:40:32,193 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7c960dc144bb2e403b8a500d7a170ddd/B in TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:32,193 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7c960dc144bb2e403b8a500d7a170ddd/A in TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:32,193 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/bff21cbbe6b049d19a50914d3aaca90f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/2b2e5d861f484f63b8533b77bbd353fe, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/670d3e60ca954600a9ab3d131d04743e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/430e23bb992a4da289369493d56eee59] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp, totalSize=53.3 K 2024-12-15T04:40:32,193 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/d36984c5a6ec4f88bc48431fd0fd70c0, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/d46143b51c4a43c2bb74ef1aee41f860, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/3aebed82efe64e59ba38176ceeaf4595, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/26e9fd2de15341acb6417e69758fa3c5] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp, totalSize=48.5 K 2024-12-15T04:40:32,193 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting bff21cbbe6b049d19a50914d3aaca90f, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734237626898 2024-12-15T04:40:32,193 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting d36984c5a6ec4f88bc48431fd0fd70c0, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734237626898 2024-12-15T04:40:32,193 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting d46143b51c4a43c2bb74ef1aee41f860, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1734237627533 2024-12-15T04:40:32,194 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b2e5d861f484f63b8533b77bbd353fe, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1734237627524 2024-12-15T04:40:32,194 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 3aebed82efe64e59ba38176ceeaf4595, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1734237628691 2024-12-15T04:40:32,194 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 670d3e60ca954600a9ab3d131d04743e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1734237628691 2024-12-15T04:40:32,194 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 26e9fd2de15341acb6417e69758fa3c5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1734237629821 2024-12-15T04:40:32,194 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 430e23bb992a4da289369493d56eee59, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1734237629821 2024-12-15T04:40:32,200 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7c960dc144bb2e403b8a500d7a170ddd#A#compaction#426 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:32,200 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7c960dc144bb2e403b8a500d7a170ddd#B#compaction#427 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:32,201 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/d1c56d36a1fc472495c871a2f7253240 is 50, key is test_row_0/A:col10/1734237629821/Put/seqid=0 2024-12-15T04:40:32,201 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/e2ab77c29baf4c7fb0eb4c47de88b3bf is 50, key is test_row_0/B:col10/1734237629821/Put/seqid=0 2024-12-15T04:40:32,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742331_1507 (size=13051) 2024-12-15T04:40:32,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742332_1508 (size=13051) 2024-12-15T04:40:32,226 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:32,227 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-12-15T04:40:32,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:32,227 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 7c960dc144bb2e403b8a500d7a170ddd 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-12-15T04:40:32,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=A 2024-12-15T04:40:32,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:32,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=B 2024-12-15T04:40:32,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:32,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=C 2024-12-15T04:40:32,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:32,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/e93a8f18e3764b97a96855185a709cd7 is 50, key is test_row_0/A:col10/1734237630971/Put/seqid=0 2024-12-15T04:40:32,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742333_1509 (size=12301) 2024-12-15T04:40:32,292 DEBUG [Thread-1980 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6dd48863 to 127.0.0.1:55935 2024-12-15T04:40:32,292 DEBUG [Thread-1980 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:40:32,294 DEBUG [Thread-1982 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51196534 to 127.0.0.1:55935 2024-12-15T04:40:32,294 DEBUG [Thread-1982 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:40:32,295 DEBUG [Thread-1984 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1dc5e114 to 127.0.0.1:55935 2024-12-15T04:40:32,295 DEBUG [Thread-1984 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:40:32,295 DEBUG [Thread-1986 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3e96b8ad to 127.0.0.1:55935 2024-12-15T04:40:32,295 DEBUG [Thread-1986 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:40:32,297 DEBUG [Thread-1988 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17e5a47d to 127.0.0.1:55935 2024-12-15T04:40:32,297 DEBUG [Thread-1988 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:40:32,614 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/e2ab77c29baf4c7fb0eb4c47de88b3bf as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/e2ab77c29baf4c7fb0eb4c47de88b3bf 2024-12-15T04:40:32,615 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/d1c56d36a1fc472495c871a2f7253240 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/d1c56d36a1fc472495c871a2f7253240 2024-12-15T04:40:32,619 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7c960dc144bb2e403b8a500d7a170ddd/B of 7c960dc144bb2e403b8a500d7a170ddd into e2ab77c29baf4c7fb0eb4c47de88b3bf(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:32,619 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:32,619 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7c960dc144bb2e403b8a500d7a170ddd/A of 7c960dc144bb2e403b8a500d7a170ddd into d1c56d36a1fc472495c871a2f7253240(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:32,619 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., storeName=7c960dc144bb2e403b8a500d7a170ddd/B, priority=12, startTime=1734237632192; duration=0sec 2024-12-15T04:40:32,619 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:32,619 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., storeName=7c960dc144bb2e403b8a500d7a170ddd/A, priority=12, startTime=1734237632192; duration=0sec 2024-12-15T04:40:32,619 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:32,619 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7c960dc144bb2e403b8a500d7a170ddd:B 2024-12-15T04:40:32,619 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:32,619 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7c960dc144bb2e403b8a500d7a170ddd:A 2024-12-15T04:40:32,619 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-15T04:40:32,620 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49668 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-15T04:40:32,620 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 7c960dc144bb2e403b8a500d7a170ddd/C is initiating minor compaction (all files) 2024-12-15T04:40:32,620 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7c960dc144bb2e403b8a500d7a170ddd/C in TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:32,620 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/c9adfcf2fe52488eb8fb6d30c6e86524, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/29157fe81dad4ca98b8dbd03df4a9701, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/fb5e92462260489eb4418ee74938e2ff, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/87fbd749b12d4fffb2559b1e6981be07] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp, totalSize=48.5 K 2024-12-15T04:40:32,621 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting c9adfcf2fe52488eb8fb6d30c6e86524, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1734237626898 2024-12-15T04:40:32,621 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 29157fe81dad4ca98b8dbd03df4a9701, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1734237627533 2024-12-15T04:40:32,621 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting fb5e92462260489eb4418ee74938e2ff, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1734237628691 2024-12-15T04:40:32,622 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 87fbd749b12d4fffb2559b1e6981be07, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1734237629821 2024-12-15T04:40:32,630 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7c960dc144bb2e403b8a500d7a170ddd#C#compaction#429 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:32,631 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/333fa2d53c1f4eb58201439f7694dd1f is 50, key is test_row_0/C:col10/1734237629821/Put/seqid=0 2024-12-15T04:40:32,635 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/e93a8f18e3764b97a96855185a709cd7 2024-12-15T04:40:32,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742334_1510 (size=13051) 2024-12-15T04:40:32,639 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/333fa2d53c1f4eb58201439f7694dd1f as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/333fa2d53c1f4eb58201439f7694dd1f 2024-12-15T04:40:32,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/3f713d7ce9274acf989e8b4ee7b6ed4a is 50, key is test_row_0/B:col10/1734237630971/Put/seqid=0 2024-12-15T04:40:32,643 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7c960dc144bb2e403b8a500d7a170ddd/C of 7c960dc144bb2e403b8a500d7a170ddd into 333fa2d53c1f4eb58201439f7694dd1f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:32,643 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:32,643 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd., storeName=7c960dc144bb2e403b8a500d7a170ddd/C, priority=12, startTime=1734237632192; duration=0sec 2024-12-15T04:40:32,643 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:32,643 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7c960dc144bb2e403b8a500d7a170ddd:C 2024-12-15T04:40:32,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742335_1511 (size=12301) 2024-12-15T04:40:33,046 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/3f713d7ce9274acf989e8b4ee7b6ed4a 2024-12-15T04:40:33,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/5434625a4b7d4564b1262e0f4db5cd2c is 50, key is test_row_0/C:col10/1734237630971/Put/seqid=0 2024-12-15T04:40:33,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742336_1512 (size=12301) 2024-12-15T04:40:33,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:33,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. as already flushing 2024-12-15T04:40:33,093 DEBUG [Thread-1977 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x41b0e7b6 to 127.0.0.1:55935 2024-12-15T04:40:33,093 DEBUG [Thread-1977 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:40:33,094 DEBUG [Thread-1975 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5f7c40ba to 127.0.0.1:55935 2024-12-15T04:40:33,094 DEBUG [Thread-1975 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:40:33,099 DEBUG [Thread-1973 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b7f20c4 to 127.0.0.1:55935 2024-12-15T04:40:33,099 DEBUG [Thread-1973 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:40:33,102 DEBUG [Thread-1969 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5871c039 to 127.0.0.1:55935 2024-12-15T04:40:33,102 DEBUG [Thread-1969 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:40:33,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-15T04:40:33,487 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=329 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/5434625a4b7d4564b1262e0f4db5cd2c 2024-12-15T04:40:33,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/e93a8f18e3764b97a96855185a709cd7 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/e93a8f18e3764b97a96855185a709cd7 2024-12-15T04:40:33,501 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/e93a8f18e3764b97a96855185a709cd7, entries=150, sequenceid=329, filesize=12.0 K 2024-12-15T04:40:33,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/3f713d7ce9274acf989e8b4ee7b6ed4a as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/3f713d7ce9274acf989e8b4ee7b6ed4a 2024-12-15T04:40:33,508 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/3f713d7ce9274acf989e8b4ee7b6ed4a, entries=150, sequenceid=329, filesize=12.0 K 2024-12-15T04:40:33,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/5434625a4b7d4564b1262e0f4db5cd2c as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/5434625a4b7d4564b1262e0f4db5cd2c 2024-12-15T04:40:33,513 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/5434625a4b7d4564b1262e0f4db5cd2c, entries=150, sequenceid=329, filesize=12.0 K 2024-12-15T04:40:33,514 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=26.84 KB/27480 for 7c960dc144bb2e403b8a500d7a170ddd in 1287ms, sequenceid=329, compaction requested=false 2024-12-15T04:40:33,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:33,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:33,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-12-15T04:40:33,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-12-15T04:40:33,517 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-12-15T04:40:33,517 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5050 sec 2024-12-15T04:40:33,518 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 2.5090 sec 2024-12-15T04:40:35,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-12-15T04:40:35,115 INFO [Thread-1979 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-12-15T04:40:36,962 DEBUG [Thread-1971 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7daa5922 to 127.0.0.1:55935 2024-12-15T04:40:36,962 DEBUG [Thread-1971 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:40:36,963 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-15T04:40:36,963 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 47 2024-12-15T04:40:36,963 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 41 2024-12-15T04:40:36,963 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 51 2024-12-15T04:40:36,963 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 63 2024-12-15T04:40:36,963 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 62 2024-12-15T04:40:36,963 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-15T04:40:36,963 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-15T04:40:36,963 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3495 2024-12-15T04:40:36,963 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10485 rows 2024-12-15T04:40:36,963 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3519 2024-12-15T04:40:36,963 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10557 rows 2024-12-15T04:40:36,963 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3505 2024-12-15T04:40:36,963 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10515 rows 2024-12-15T04:40:36,963 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3500 2024-12-15T04:40:36,963 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10500 rows 2024-12-15T04:40:36,963 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 3508 2024-12-15T04:40:36,963 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 10524 rows 2024-12-15T04:40:36,963 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-15T04:40:36,963 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7d0ab200 to 127.0.0.1:55935 2024-12-15T04:40:36,963 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:40:36,966 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-15T04:40:36,966 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-15T04:40:36,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-15T04:40:36,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-15T04:40:36,968 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237636968"}]},"ts":"1734237636968"} 2024-12-15T04:40:36,969 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-15T04:40:37,020 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-15T04:40:37,021 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-15T04:40:37,023 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7c960dc144bb2e403b8a500d7a170ddd, UNASSIGN}] 2024-12-15T04:40:37,024 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7c960dc144bb2e403b8a500d7a170ddd, UNASSIGN 2024-12-15T04:40:37,025 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=142 updating hbase:meta row=7c960dc144bb2e403b8a500d7a170ddd, regionState=CLOSING, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:37,026 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T04:40:37,026 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; CloseRegionProcedure 7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035}] 2024-12-15T04:40:37,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-15T04:40:37,178 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:37,179 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] handler.UnassignRegionHandler(124): Close 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:37,179 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T04:40:37,179 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1681): Closing 7c960dc144bb2e403b8a500d7a170ddd, disabling compactions & flushes 2024-12-15T04:40:37,179 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:37,180 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:37,180 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. after waiting 0 ms 2024-12-15T04:40:37,180 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:37,180 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(2837): Flushing 7c960dc144bb2e403b8a500d7a170ddd 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-15T04:40:37,180 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=A 2024-12-15T04:40:37,181 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:37,181 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=B 2024-12-15T04:40:37,181 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:37,181 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7c960dc144bb2e403b8a500d7a170ddd, store=C 2024-12-15T04:40:37,181 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:37,188 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/908afcb3703042b4bc05ff373cc9f4bd is 50, key is test_row_0/A:col10/1734237633102/Put/seqid=0 2024-12-15T04:40:37,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742337_1513 (size=9857) 2024-12-15T04:40:37,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-15T04:40:37,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-15T04:40:37,595 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/908afcb3703042b4bc05ff373cc9f4bd 2024-12-15T04:40:37,607 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/9352ff89846c41c9a01e6ebd838c88c9 is 50, key is test_row_0/B:col10/1734237633102/Put/seqid=0 2024-12-15T04:40:37,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742338_1514 (size=9857) 2024-12-15T04:40:38,014 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/9352ff89846c41c9a01e6ebd838c88c9 2024-12-15T04:40:38,026 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/a4548ec9a82847b1872dfdf4f5dde5f0 is 50, key is test_row_0/C:col10/1734237633102/Put/seqid=0 2024-12-15T04:40:38,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742339_1515 (size=9857) 2024-12-15T04:40:38,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-15T04:40:38,433 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=340 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/a4548ec9a82847b1872dfdf4f5dde5f0 2024-12-15T04:40:38,457 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/A/908afcb3703042b4bc05ff373cc9f4bd as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/908afcb3703042b4bc05ff373cc9f4bd 2024-12-15T04:40:38,463 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/908afcb3703042b4bc05ff373cc9f4bd, entries=100, sequenceid=340, filesize=9.6 K 2024-12-15T04:40:38,464 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/B/9352ff89846c41c9a01e6ebd838c88c9 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/9352ff89846c41c9a01e6ebd838c88c9 2024-12-15T04:40:38,468 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/9352ff89846c41c9a01e6ebd838c88c9, entries=100, sequenceid=340, filesize=9.6 K 2024-12-15T04:40:38,469 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/.tmp/C/a4548ec9a82847b1872dfdf4f5dde5f0 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/a4548ec9a82847b1872dfdf4f5dde5f0 2024-12-15T04:40:38,472 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/a4548ec9a82847b1872dfdf4f5dde5f0, entries=100, sequenceid=340, filesize=9.6 K 2024-12-15T04:40:38,473 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 7c960dc144bb2e403b8a500d7a170ddd in 1293ms, sequenceid=340, compaction requested=true 2024-12-15T04:40:38,474 DEBUG [StoreCloser-TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/d17a4380c41043cfb638f3c65db7a4f2, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/5b7662b31d854001ad33f62d109c82b3, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/3e79ee560b6a492a80ded4345d90c6e4, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/924c26784a36415aa1ac9088b9635a03, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/b2bc4b2d7fec4253b460979562376da8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/9a6570a211a046beae853e9f048102a3, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/6690d1336cba4655a52ebafc61009cee, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/d1cd6b3e895c441eaff999917ecf4802, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/e3c9a6702f5b43baaba81f9ab13672a5, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/21d458966fd74c87bb19684d9881102f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/090ccd24cde249bb8b03dd58445076a7, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/c32cb4fcdc6441ce957c5e61cb179e83, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/02962749ca844cb59394440372ffc202, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/428b89d07f2a40a2b8a725ed086b0f17, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/f4b4f3ee48114ac0ab7e95d2c06aecaa, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/d6aa7376bb3f4f158a53e10597c72a00, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/1c28411a9d3d4b568ff9224457d59e55, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/5cb5c44685b0473ca9c59054bc6297f1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/bff21cbbe6b049d19a50914d3aaca90f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/2b2e5d861f484f63b8533b77bbd353fe, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/670d3e60ca954600a9ab3d131d04743e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/430e23bb992a4da289369493d56eee59] to archive 2024-12-15T04:40:38,475 DEBUG [StoreCloser-TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-15T04:40:38,477 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/b2bc4b2d7fec4253b460979562376da8 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/b2bc4b2d7fec4253b460979562376da8 2024-12-15T04:40:38,477 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/5b7662b31d854001ad33f62d109c82b3 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/5b7662b31d854001ad33f62d109c82b3 2024-12-15T04:40:38,477 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/6690d1336cba4655a52ebafc61009cee to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/6690d1336cba4655a52ebafc61009cee 2024-12-15T04:40:38,477 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/d17a4380c41043cfb638f3c65db7a4f2 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/d17a4380c41043cfb638f3c65db7a4f2 2024-12-15T04:40:38,477 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/9a6570a211a046beae853e9f048102a3 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/9a6570a211a046beae853e9f048102a3 2024-12-15T04:40:38,477 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/924c26784a36415aa1ac9088b9635a03 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/924c26784a36415aa1ac9088b9635a03 2024-12-15T04:40:38,477 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/d1cd6b3e895c441eaff999917ecf4802 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/d1cd6b3e895c441eaff999917ecf4802 2024-12-15T04:40:38,477 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/3e79ee560b6a492a80ded4345d90c6e4 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/3e79ee560b6a492a80ded4345d90c6e4 2024-12-15T04:40:38,478 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/090ccd24cde249bb8b03dd58445076a7 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/090ccd24cde249bb8b03dd58445076a7 2024-12-15T04:40:38,479 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/21d458966fd74c87bb19684d9881102f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/21d458966fd74c87bb19684d9881102f 2024-12-15T04:40:38,479 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/e3c9a6702f5b43baaba81f9ab13672a5 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/e3c9a6702f5b43baaba81f9ab13672a5 2024-12-15T04:40:38,479 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/c32cb4fcdc6441ce957c5e61cb179e83 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/c32cb4fcdc6441ce957c5e61cb179e83 2024-12-15T04:40:38,479 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/f4b4f3ee48114ac0ab7e95d2c06aecaa to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/f4b4f3ee48114ac0ab7e95d2c06aecaa 2024-12-15T04:40:38,479 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/428b89d07f2a40a2b8a725ed086b0f17 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/428b89d07f2a40a2b8a725ed086b0f17 2024-12-15T04:40:38,479 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/02962749ca844cb59394440372ffc202 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/02962749ca844cb59394440372ffc202 2024-12-15T04:40:38,479 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/d6aa7376bb3f4f158a53e10597c72a00 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/d6aa7376bb3f4f158a53e10597c72a00 2024-12-15T04:40:38,480 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/5cb5c44685b0473ca9c59054bc6297f1 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/5cb5c44685b0473ca9c59054bc6297f1 2024-12-15T04:40:38,480 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/bff21cbbe6b049d19a50914d3aaca90f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/bff21cbbe6b049d19a50914d3aaca90f 2024-12-15T04:40:38,480 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/1c28411a9d3d4b568ff9224457d59e55 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/1c28411a9d3d4b568ff9224457d59e55 2024-12-15T04:40:38,481 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/2b2e5d861f484f63b8533b77bbd353fe to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/2b2e5d861f484f63b8533b77bbd353fe 2024-12-15T04:40:38,481 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/430e23bb992a4da289369493d56eee59 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/430e23bb992a4da289369493d56eee59 2024-12-15T04:40:38,481 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/670d3e60ca954600a9ab3d131d04743e to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/670d3e60ca954600a9ab3d131d04743e 2024-12-15T04:40:38,482 DEBUG [StoreCloser-TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/76e3fe40a08944a3ab13255a8e45efb5, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/c979d87689d14c68a3dc740bdedeb2a0, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/dac95dc0f7134971aa20277180caf5c2, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/49bc2cc2fdf64c07b12dc0c72d51bddc, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/471bbd0f685044ef942a8ce5aef5f6d4, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/96702e6038ea44bb9bec2f3b41919bf2, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/6ab27b8f51cf469fb53b8d453c3d0fd1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/43b450bc7b3d4406a7c82a68a2fd0d3f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/ed4c7ec331cf48a9905f7ffac00c7bcf, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/1da02946a6404d5cb53ccb29b5a3d127, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/70a762830f2b4553b74894a4213fc872, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/bf726a9e440841328fbe51f78e6f0a0f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/c77c4ea8ac404322a3db3e0b89b368f6, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/d92cb602e1fa4565a0e814306e157f35, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/90d39c5097af4214829ca27aed1ba2d7, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/6063884dd3f2498c8ad271f0e007c997, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/6ca9abacf7084d55b0ec9e91378683b2, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/d36984c5a6ec4f88bc48431fd0fd70c0, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/357fae7d6c3744f28e740e236f32f3e8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/d46143b51c4a43c2bb74ef1aee41f860, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/3aebed82efe64e59ba38176ceeaf4595, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/26e9fd2de15341acb6417e69758fa3c5] to archive 2024-12-15T04:40:38,483 DEBUG [StoreCloser-TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-15T04:40:38,484 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/76e3fe40a08944a3ab13255a8e45efb5 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/76e3fe40a08944a3ab13255a8e45efb5 2024-12-15T04:40:38,484 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/dac95dc0f7134971aa20277180caf5c2 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/dac95dc0f7134971aa20277180caf5c2 2024-12-15T04:40:38,484 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/471bbd0f685044ef942a8ce5aef5f6d4 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/471bbd0f685044ef942a8ce5aef5f6d4 2024-12-15T04:40:38,484 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/c979d87689d14c68a3dc740bdedeb2a0 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/c979d87689d14c68a3dc740bdedeb2a0 2024-12-15T04:40:38,484 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/43b450bc7b3d4406a7c82a68a2fd0d3f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/43b450bc7b3d4406a7c82a68a2fd0d3f 2024-12-15T04:40:38,485 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/49bc2cc2fdf64c07b12dc0c72d51bddc to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/49bc2cc2fdf64c07b12dc0c72d51bddc 2024-12-15T04:40:38,485 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/96702e6038ea44bb9bec2f3b41919bf2 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/96702e6038ea44bb9bec2f3b41919bf2 2024-12-15T04:40:38,485 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/6ab27b8f51cf469fb53b8d453c3d0fd1 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/6ab27b8f51cf469fb53b8d453c3d0fd1 2024-12-15T04:40:38,486 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/ed4c7ec331cf48a9905f7ffac00c7bcf to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/ed4c7ec331cf48a9905f7ffac00c7bcf 2024-12-15T04:40:38,486 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/bf726a9e440841328fbe51f78e6f0a0f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/bf726a9e440841328fbe51f78e6f0a0f 2024-12-15T04:40:38,486 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/1da02946a6404d5cb53ccb29b5a3d127 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/1da02946a6404d5cb53ccb29b5a3d127 2024-12-15T04:40:38,486 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/70a762830f2b4553b74894a4213fc872 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/70a762830f2b4553b74894a4213fc872 2024-12-15T04:40:38,486 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/d92cb602e1fa4565a0e814306e157f35 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/d92cb602e1fa4565a0e814306e157f35 2024-12-15T04:40:38,486 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/c77c4ea8ac404322a3db3e0b89b368f6 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/c77c4ea8ac404322a3db3e0b89b368f6 2024-12-15T04:40:38,486 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/90d39c5097af4214829ca27aed1ba2d7 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/90d39c5097af4214829ca27aed1ba2d7 2024-12-15T04:40:38,487 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/6063884dd3f2498c8ad271f0e007c997 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/6063884dd3f2498c8ad271f0e007c997 2024-12-15T04:40:38,487 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/d36984c5a6ec4f88bc48431fd0fd70c0 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/d36984c5a6ec4f88bc48431fd0fd70c0 2024-12-15T04:40:38,487 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/6ca9abacf7084d55b0ec9e91378683b2 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/6ca9abacf7084d55b0ec9e91378683b2 2024-12-15T04:40:38,487 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/357fae7d6c3744f28e740e236f32f3e8 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/357fae7d6c3744f28e740e236f32f3e8 2024-12-15T04:40:38,487 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/d46143b51c4a43c2bb74ef1aee41f860 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/d46143b51c4a43c2bb74ef1aee41f860 2024-12-15T04:40:38,487 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/26e9fd2de15341acb6417e69758fa3c5 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/26e9fd2de15341acb6417e69758fa3c5 2024-12-15T04:40:38,487 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/3aebed82efe64e59ba38176ceeaf4595 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/3aebed82efe64e59ba38176ceeaf4595 2024-12-15T04:40:38,488 DEBUG [StoreCloser-TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/7ae1dd13abc64b6eb472710f12c6a278, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/56ccfc02976f4680a5c2548be3b2a50d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/97c9a5a301534661b3991fa981d94ca8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/00e943de5cb14851a9b0b1a33915b067, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/73d47b5b407e452399aee27cd3cd8081, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/5e35eae8fea3424687ed34bea2afe83d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/bc6a8a700a954e6ba80e5cdbb15671b1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/f30b772d908d42308ded31b4a25a5445, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/7b17604755ce41fda622ecd1f6d0c739, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/f97b27c787d240fda1185d8a0a93e5dd, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/388d364c5a2842d6af89c640b73080e7, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/46f0f31049e34d2cb8360a4a969131ac, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/4e683221ad9245438be08be39a5a7988, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/0b029b088d294e799578de2a99164c05, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/9f92efa973384d2f852f4b1100efb32a, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/33679dd7408145ac87766650ae51fc8a, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/f5d89eec37094dfaae42796dab643f12, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/c9adfcf2fe52488eb8fb6d30c6e86524, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/c0895091b0b147f6995fc3c7c78d0ef1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/29157fe81dad4ca98b8dbd03df4a9701, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/fb5e92462260489eb4418ee74938e2ff, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/87fbd749b12d4fffb2559b1e6981be07] to archive 2024-12-15T04:40:38,489 DEBUG [StoreCloser-TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-15T04:40:38,490 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/97c9a5a301534661b3991fa981d94ca8 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/97c9a5a301534661b3991fa981d94ca8 2024-12-15T04:40:38,490 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/7ae1dd13abc64b6eb472710f12c6a278 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/7ae1dd13abc64b6eb472710f12c6a278 2024-12-15T04:40:38,490 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/56ccfc02976f4680a5c2548be3b2a50d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/56ccfc02976f4680a5c2548be3b2a50d 2024-12-15T04:40:38,490 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/00e943de5cb14851a9b0b1a33915b067 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/00e943de5cb14851a9b0b1a33915b067 2024-12-15T04:40:38,490 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/5e35eae8fea3424687ed34bea2afe83d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/5e35eae8fea3424687ed34bea2afe83d 2024-12-15T04:40:38,490 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/73d47b5b407e452399aee27cd3cd8081 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/73d47b5b407e452399aee27cd3cd8081 2024-12-15T04:40:38,490 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/bc6a8a700a954e6ba80e5cdbb15671b1 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/bc6a8a700a954e6ba80e5cdbb15671b1 2024-12-15T04:40:38,490 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/f30b772d908d42308ded31b4a25a5445 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/f30b772d908d42308ded31b4a25a5445 2024-12-15T04:40:38,491 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/7b17604755ce41fda622ecd1f6d0c739 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/7b17604755ce41fda622ecd1f6d0c739 2024-12-15T04:40:38,491 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/f97b27c787d240fda1185d8a0a93e5dd to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/f97b27c787d240fda1185d8a0a93e5dd 2024-12-15T04:40:38,491 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/388d364c5a2842d6af89c640b73080e7 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/388d364c5a2842d6af89c640b73080e7 2024-12-15T04:40:38,491 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/4e683221ad9245438be08be39a5a7988 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/4e683221ad9245438be08be39a5a7988 2024-12-15T04:40:38,492 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/46f0f31049e34d2cb8360a4a969131ac to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/46f0f31049e34d2cb8360a4a969131ac 2024-12-15T04:40:38,492 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/33679dd7408145ac87766650ae51fc8a to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/33679dd7408145ac87766650ae51fc8a 2024-12-15T04:40:38,492 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/0b029b088d294e799578de2a99164c05 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/0b029b088d294e799578de2a99164c05 2024-12-15T04:40:38,492 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/9f92efa973384d2f852f4b1100efb32a to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/9f92efa973384d2f852f4b1100efb32a 2024-12-15T04:40:38,492 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/f5d89eec37094dfaae42796dab643f12 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/f5d89eec37094dfaae42796dab643f12 2024-12-15T04:40:38,493 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/c9adfcf2fe52488eb8fb6d30c6e86524 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/c9adfcf2fe52488eb8fb6d30c6e86524 2024-12-15T04:40:38,493 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/c0895091b0b147f6995fc3c7c78d0ef1 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/c0895091b0b147f6995fc3c7c78d0ef1 2024-12-15T04:40:38,493 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/fb5e92462260489eb4418ee74938e2ff to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/fb5e92462260489eb4418ee74938e2ff 2024-12-15T04:40:38,493 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/87fbd749b12d4fffb2559b1e6981be07 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/87fbd749b12d4fffb2559b1e6981be07 2024-12-15T04:40:38,493 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/29157fe81dad4ca98b8dbd03df4a9701 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/29157fe81dad4ca98b8dbd03df4a9701 2024-12-15T04:40:38,496 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/recovered.edits/343.seqid, newMaxSeqId=343, maxSeqId=1 2024-12-15T04:40:38,496 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd. 2024-12-15T04:40:38,496 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] regionserver.HRegion(1635): Region close journal for 7c960dc144bb2e403b8a500d7a170ddd: 2024-12-15T04:40:38,498 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=143}] handler.UnassignRegionHandler(170): Closed 7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:38,498 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=142 updating hbase:meta row=7c960dc144bb2e403b8a500d7a170ddd, regionState=CLOSED 2024-12-15T04:40:38,500 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-12-15T04:40:38,500 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; CloseRegionProcedure 7c960dc144bb2e403b8a500d7a170ddd, server=e56de37b85b3,43199,1734237482035 in 1.4730 sec 2024-12-15T04:40:38,501 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=142, resume processing ppid=141 2024-12-15T04:40:38,501 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, ppid=141, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7c960dc144bb2e403b8a500d7a170ddd, UNASSIGN in 1.4770 sec 2024-12-15T04:40:38,502 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-12-15T04:40:38,502 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4800 sec 2024-12-15T04:40:38,503 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237638502"}]},"ts":"1734237638502"} 2024-12-15T04:40:38,503 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-15T04:40:38,519 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-15T04:40:38,521 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5540 sec 2024-12-15T04:40:39,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-12-15T04:40:39,076 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-12-15T04:40:39,078 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-15T04:40:39,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:40:39,081 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=144, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:40:39,082 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=144, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:40:39,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-15T04:40:39,084 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:39,087 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A, FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B, FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C, FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/recovered.edits] 2024-12-15T04:40:39,092 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/908afcb3703042b4bc05ff373cc9f4bd to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/908afcb3703042b4bc05ff373cc9f4bd 2024-12-15T04:40:39,092 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/d1c56d36a1fc472495c871a2f7253240 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/d1c56d36a1fc472495c871a2f7253240 2024-12-15T04:40:39,093 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/e93a8f18e3764b97a96855185a709cd7 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/A/e93a8f18e3764b97a96855185a709cd7 2024-12-15T04:40:39,096 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/9352ff89846c41c9a01e6ebd838c88c9 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/9352ff89846c41c9a01e6ebd838c88c9 2024-12-15T04:40:39,096 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/e2ab77c29baf4c7fb0eb4c47de88b3bf to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/e2ab77c29baf4c7fb0eb4c47de88b3bf 2024-12-15T04:40:39,096 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/3f713d7ce9274acf989e8b4ee7b6ed4a to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/B/3f713d7ce9274acf989e8b4ee7b6ed4a 2024-12-15T04:40:39,099 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/5434625a4b7d4564b1262e0f4db5cd2c to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/5434625a4b7d4564b1262e0f4db5cd2c 2024-12-15T04:40:39,099 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/a4548ec9a82847b1872dfdf4f5dde5f0 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/a4548ec9a82847b1872dfdf4f5dde5f0 2024-12-15T04:40:39,099 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/333fa2d53c1f4eb58201439f7694dd1f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/C/333fa2d53c1f4eb58201439f7694dd1f 2024-12-15T04:40:39,102 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/recovered.edits/343.seqid to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd/recovered.edits/343.seqid 2024-12-15T04:40:39,102 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/7c960dc144bb2e403b8a500d7a170ddd 2024-12-15T04:40:39,102 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-15T04:40:39,105 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=144, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:40:39,106 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-15T04:40:39,108 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-15T04:40:39,109 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=144, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:40:39,109 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-15T04:40:39,109 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734237639109"}]},"ts":"9223372036854775807"} 2024-12-15T04:40:39,110 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-15T04:40:39,110 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 7c960dc144bb2e403b8a500d7a170ddd, NAME => 'TestAcidGuarantees,,1734237610003.7c960dc144bb2e403b8a500d7a170ddd.', STARTKEY => '', ENDKEY => ''}] 2024-12-15T04:40:39,110 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-15T04:40:39,110 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734237639110"}]},"ts":"9223372036854775807"} 2024-12-15T04:40:39,111 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-15T04:40:39,154 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=144, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:40:39,154 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 76 msec 2024-12-15T04:40:39,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-15T04:40:39,184 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-12-15T04:40:39,191 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testScanAtomicity Thread=244 (was 244), OpenFileDescriptor=449 (was 446) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=315 (was 302) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4470 (was 4484) 2024-12-15T04:40:39,199 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=244, OpenFileDescriptor=449, MaxFileDescriptor=1048576, SystemLoadAverage=315, ProcessCount=11, AvailableMemoryMB=4470 2024-12-15T04:40:39,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-15T04:40:39,201 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-15T04:40:39,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=145, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-12-15T04:40:39,203 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-12-15T04:40:39,203 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:39,203 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 145 2024-12-15T04:40:39,203 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-15T04:40:39,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-15T04:40:39,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742340_1516 (size=963) 2024-12-15T04:40:39,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-15T04:40:39,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-15T04:40:39,615 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9 2024-12-15T04:40:39,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742341_1517 (size=53) 2024-12-15T04:40:39,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-15T04:40:40,027 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T04:40:40,027 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 3aea4ed87c2ca56ea7b1d05fdf98762f, disabling compactions & flushes 2024-12-15T04:40:40,027 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:40,027 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:40,027 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. after waiting 0 ms 2024-12-15T04:40:40,027 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:40,027 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:40,027 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:40,030 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-12-15T04:40:40,030 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1734237640030"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1734237640030"}]},"ts":"1734237640030"} 2024-12-15T04:40:40,033 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-15T04:40:40,033 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-15T04:40:40,034 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237640034"}]},"ts":"1734237640034"} 2024-12-15T04:40:40,035 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-12-15T04:40:40,062 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3aea4ed87c2ca56ea7b1d05fdf98762f, ASSIGN}] 2024-12-15T04:40:40,063 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3aea4ed87c2ca56ea7b1d05fdf98762f, ASSIGN 2024-12-15T04:40:40,064 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=3aea4ed87c2ca56ea7b1d05fdf98762f, ASSIGN; state=OFFLINE, location=e56de37b85b3,43199,1734237482035; forceNewPlan=false, retain=false 2024-12-15T04:40:40,215 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=3aea4ed87c2ca56ea7b1d05fdf98762f, regionState=OPENING, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:40,217 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; OpenRegionProcedure 3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035}] 2024-12-15T04:40:40,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-15T04:40:40,370 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:40,377 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:40,377 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7285): Opening region: {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} 2024-12-15T04:40:40,378 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:40,378 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T04:40:40,378 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7327): checking encryption for 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:40,379 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7330): checking classloading for 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:40,381 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:40,383 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:40:40,383 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3aea4ed87c2ca56ea7b1d05fdf98762f columnFamilyName A 2024-12-15T04:40:40,383 DEBUG [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:40,383 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] regionserver.HStore(327): Store=3aea4ed87c2ca56ea7b1d05fdf98762f/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:40:40,383 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:40,384 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:40:40,384 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3aea4ed87c2ca56ea7b1d05fdf98762f columnFamilyName B 2024-12-15T04:40:40,384 DEBUG [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:40,385 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] regionserver.HStore(327): Store=3aea4ed87c2ca56ea7b1d05fdf98762f/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:40:40,385 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:40,386 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:40:40,386 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3aea4ed87c2ca56ea7b1d05fdf98762f columnFamilyName C 2024-12-15T04:40:40,386 DEBUG [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:40,386 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] regionserver.HStore(327): Store=3aea4ed87c2ca56ea7b1d05fdf98762f/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:40:40,386 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:40,387 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:40,387 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:40,389 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-15T04:40:40,390 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1085): writing seq id for 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:40,391 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-15T04:40:40,392 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1102): Opened 3aea4ed87c2ca56ea7b1d05fdf98762f; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64766028, jitterRate=-0.03491097688674927}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-15T04:40:40,392 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1001): Region open journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:40,393 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f., pid=147, masterSystemTime=1734237640370 2024-12-15T04:40:40,394 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:40,394 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:40,394 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=3aea4ed87c2ca56ea7b1d05fdf98762f, regionState=OPEN, openSeqNum=2, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:40,395 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35185 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=OPEN, location=e56de37b85b3,43199,1734237482035, table=TestAcidGuarantees, region=3aea4ed87c2ca56ea7b1d05fdf98762f. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-15T04:40:40,396 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-12-15T04:40:40,396 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; OpenRegionProcedure 3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 in 178 msec 2024-12-15T04:40:40,398 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=146, resume processing ppid=145 2024-12-15T04:40:40,398 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=145, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3aea4ed87c2ca56ea7b1d05fdf98762f, ASSIGN in 335 msec 2024-12-15T04:40:40,399 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-15T04:40:40,399 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237640399"}]},"ts":"1734237640399"} 2024-12-15T04:40:40,400 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-12-15T04:40:40,435 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=145, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-12-15T04:40:40,437 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2340 sec 2024-12-15T04:40:41,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=145 2024-12-15T04:40:41,314 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 145 completed 2024-12-15T04:40:41,317 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c9b5141 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@103dfc6e 2024-12-15T04:40:41,378 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7181df3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:40:41,380 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:40:41,381 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33472, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:40:41,382 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-15T04:40:41,383 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51288, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-15T04:40:41,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-12-15T04:40:41,385 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'ADAPTIVE', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-15T04:40:41,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-12-15T04:40:41,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742342_1518 (size=999) 2024-12-15T04:40:41,802 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.963 2024-12-15T04:40:41,802 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.999 2024-12-15T04:40:41,807 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-15T04:40:41,810 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3aea4ed87c2ca56ea7b1d05fdf98762f, REOPEN/MOVE}] 2024-12-15T04:40:41,811 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3aea4ed87c2ca56ea7b1d05fdf98762f, REOPEN/MOVE 2024-12-15T04:40:41,812 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=3aea4ed87c2ca56ea7b1d05fdf98762f, regionState=CLOSING, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:41,813 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T04:40:41,813 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; CloseRegionProcedure 3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035}] 2024-12-15T04:40:41,965 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:41,966 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(124): Close 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:41,966 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T04:40:41,966 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1681): Closing 3aea4ed87c2ca56ea7b1d05fdf98762f, disabling compactions & flushes 2024-12-15T04:40:41,966 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:41,966 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:41,966 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. after waiting 0 ms 2024-12-15T04:40:41,966 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:41,975 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-12-15T04:40:41,976 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:41,976 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1635): Region close journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:41,976 WARN [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionServer(3786): Not adding moved region record: 3aea4ed87c2ca56ea7b1d05fdf98762f to self. 2024-12-15T04:40:41,978 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(170): Closed 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:41,979 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=3aea4ed87c2ca56ea7b1d05fdf98762f, regionState=CLOSED 2024-12-15T04:40:41,982 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-12-15T04:40:41,982 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; CloseRegionProcedure 3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 in 167 msec 2024-12-15T04:40:41,982 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=3aea4ed87c2ca56ea7b1d05fdf98762f, REOPEN/MOVE; state=CLOSED, location=e56de37b85b3,43199,1734237482035; forceNewPlan=false, retain=true 2024-12-15T04:40:42,133 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=3aea4ed87c2ca56ea7b1d05fdf98762f, regionState=OPENING, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:42,134 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=150, state=RUNNABLE; OpenRegionProcedure 3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035}] 2024-12-15T04:40:42,285 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:42,291 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:42,291 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(7285): Opening region: {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} 2024-12-15T04:40:42,292 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:42,292 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-15T04:40:42,292 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(7327): checking encryption for 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:42,292 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(7330): checking classloading for 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:42,295 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:42,296 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:40:42,297 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3aea4ed87c2ca56ea7b1d05fdf98762f columnFamilyName A 2024-12-15T04:40:42,299 DEBUG [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:42,300 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] regionserver.HStore(327): Store=3aea4ed87c2ca56ea7b1d05fdf98762f/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:40:42,300 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:42,301 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:40:42,301 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3aea4ed87c2ca56ea7b1d05fdf98762f columnFamilyName B 2024-12-15T04:40:42,301 DEBUG [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:42,302 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] regionserver.HStore(327): Store=3aea4ed87c2ca56ea7b1d05fdf98762f/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:40:42,302 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:42,303 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=ADAPTIVE, pipelineThreshold=2, compactionCellMax=10 2024-12-15T04:40:42,303 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3aea4ed87c2ca56ea7b1d05fdf98762f columnFamilyName C 2024-12-15T04:40:42,303 DEBUG [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:42,304 INFO [StoreOpener-3aea4ed87c2ca56ea7b1d05fdf98762f-1 {}] regionserver.HStore(327): Store=3aea4ed87c2ca56ea7b1d05fdf98762f/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-15T04:40:42,304 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:42,305 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:42,306 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:42,308 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-12-15T04:40:42,309 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1085): writing seq id for 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:42,310 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1102): Opened 3aea4ed87c2ca56ea7b1d05fdf98762f; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73993244, jitterRate=0.10258525609970093}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-12-15T04:40:42,311 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegion(1001): Region open journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:42,311 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f., pid=152, masterSystemTime=1734237642285 2024-12-15T04:40:42,313 DEBUG [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:42,313 INFO [RS_OPEN_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_OPEN_REGION, pid=152}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:42,313 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=3aea4ed87c2ca56ea7b1d05fdf98762f, regionState=OPEN, openSeqNum=5, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:42,315 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=150 2024-12-15T04:40:42,315 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=150, state=SUCCESS; OpenRegionProcedure 3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 in 180 msec 2024-12-15T04:40:42,317 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-12-15T04:40:42,317 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3aea4ed87c2ca56ea7b1d05fdf98762f, REOPEN/MOVE in 505 msec 2024-12-15T04:40:42,319 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-12-15T04:40:42,319 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 510 msec 2024-12-15T04:40:42,321 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 934 msec 2024-12-15T04:40:42,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-12-15T04:40:42,324 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11a52cdf to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e047c09 2024-12-15T04:40:42,362 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11030ef5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:40:42,363 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2d7fe431 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@60d631a3 2024-12-15T04:40:42,377 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69abefea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:40:42,377 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x091d72db to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58971172 2024-12-15T04:40:42,387 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e757135, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:40:42,387 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5d836f78 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d7fe93b 2024-12-15T04:40:42,395 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7846cb78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:40:42,395 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53305d9b to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11c440f7 2024-12-15T04:40:42,403 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f1754bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:40:42,404 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58460ef3 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d9113f3 2024-12-15T04:40:42,412 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cfdf76c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:40:42,413 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6e8cd1ae to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bb75907 2024-12-15T04:40:42,420 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68c2838a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:40:42,421 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4d832d43 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c1d3a95 2024-12-15T04:40:42,429 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50bf224f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:40:42,429 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x15b6349f to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@503a7d2e 2024-12-15T04:40:42,437 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79be903c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:40:42,437 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x439b60d5 to 127.0.0.1:55935 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@404bb685 2024-12-15T04:40:42,445 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d79f1c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-15T04:40:42,449 DEBUG [hconnection-0x73f6d181-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:40:42,449 DEBUG [hconnection-0x2b43f9e7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:40:42,450 DEBUG [hconnection-0x4aa30080-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:40:42,450 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:40:42,450 DEBUG [hconnection-0x4f15b829-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:40:42,451 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33476, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:40:42,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees 2024-12-15T04:40:42,451 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33482, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:40:42,451 DEBUG [hconnection-0x3710ff29-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:40:42,452 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33490, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:40:42,452 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:40:42,452 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33494, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:40:42,453 DEBUG [hconnection-0x5f47a45c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:40:42,453 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=153, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:40:42,453 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:40:42,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-15T04:40:42,454 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33504, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:40:42,454 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33502, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:40:42,456 DEBUG [hconnection-0x409933db-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:40:42,456 DEBUG [hconnection-0x3492323e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:40:42,457 DEBUG [hconnection-0x20933d29-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:40:42,457 DEBUG [hconnection-0x667cc0aa-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-15T04:40:42,458 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33516, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:40:42,458 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33524, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:40:42,458 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33526, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:40:42,459 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33530, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-15T04:40:42,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:42,459 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3aea4ed87c2ca56ea7b1d05fdf98762f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-15T04:40:42,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=A 2024-12-15T04:40:42,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:42,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=B 2024-12-15T04:40:42,460 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:42,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=C 2024-12-15T04:40:42,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:42,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:42,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237702470, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:42,473 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:42,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237702471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:42,473 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:42,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237702471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:42,473 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:42,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237702472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:42,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:42,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237702472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:42,484 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121564b7e3fcf9a94248829dd9d21ff8c8e6_3aea4ed87c2ca56ea7b1d05fdf98762f is 50, key is test_row_0/A:col10/1734237642459/Put/seqid=0 2024-12-15T04:40:42,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742343_1519 (size=12154) 2024-12-15T04:40:42,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-15T04:40:42,574 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:42,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237702573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:42,575 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:42,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237702573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:42,575 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:42,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237702574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:42,575 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:42,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237702574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:42,575 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:42,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237702574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:42,605 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:42,606 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-15T04:40:42,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:42,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:42,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:42,606 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:42,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:42,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:42,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-15T04:40:42,757 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:42,758 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-15T04:40:42,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:42,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:42,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:42,758 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:42,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:42,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:42,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:42,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237702776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:42,777 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:42,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237702776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:42,777 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:42,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237702776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:42,777 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:42,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:42,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237702776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:42,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237702777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:42,889 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:42,891 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121564b7e3fcf9a94248829dd9d21ff8c8e6_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121564b7e3fcf9a94248829dd9d21ff8c8e6_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:42,892 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/19629d6e5f0745b982c56278bfc22ba8, store: [table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:42,892 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/19629d6e5f0745b982c56278bfc22ba8 is 175, key is test_row_0/A:col10/1734237642459/Put/seqid=0 2024-12-15T04:40:42,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742344_1520 (size=30955) 2024-12-15T04:40:42,909 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:42,910 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-15T04:40:42,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:42,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:42,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:42,910 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:42,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:42,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:43,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-15T04:40:43,061 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:43,062 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-15T04:40:43,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:43,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:43,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:43,062 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:43,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:43,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:43,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:43,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237703077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:43,079 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:43,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237703079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:43,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:43,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237703079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:43,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:43,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237703079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:43,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:43,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237703079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:43,213 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:43,214 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-15T04:40:43,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:43,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:43,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:43,214 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:43,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:43,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:43,295 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/19629d6e5f0745b982c56278bfc22ba8 2024-12-15T04:40:43,314 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/6895276a7a4746188213579e62ab602f is 50, key is test_row_0/B:col10/1734237642459/Put/seqid=0 2024-12-15T04:40:43,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742345_1521 (size=12001) 2024-12-15T04:40:43,365 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:43,366 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-15T04:40:43,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:43,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:43,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:43,366 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:43,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:43,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:43,517 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:43,518 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-15T04:40:43,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:43,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:43,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:43,518 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:43,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:43,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:43,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-15T04:40:43,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:43,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237703580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:43,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:43,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237703580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:43,581 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:43,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237703581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:43,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:43,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237703584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:43,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:43,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237703585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:43,670 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:43,670 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-15T04:40:43,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:43,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:43,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:43,670 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:43,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:43,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:43,717 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/6895276a7a4746188213579e62ab602f 2024-12-15T04:40:43,739 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/0197c48f505b4cd69398a627bf3c90be is 50, key is test_row_0/C:col10/1734237642459/Put/seqid=0 2024-12-15T04:40:43,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742346_1522 (size=12001) 2024-12-15T04:40:43,822 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:43,822 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-15T04:40:43,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:43,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:43,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:43,822 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:43,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:43,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:43,974 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:43,974 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-15T04:40:43,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:43,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:43,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:43,974 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:43,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:43,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:44,014 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-12-15T04:40:44,126 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:44,126 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-15T04:40:44,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:44,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:44,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:44,127 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] handler.RSProcedureHandler(58): pid=154 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:44,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=154 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:44,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=154 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:44,143 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/0197c48f505b4cd69398a627bf3c90be 2024-12-15T04:40:44,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/19629d6e5f0745b982c56278bfc22ba8 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/19629d6e5f0745b982c56278bfc22ba8 2024-12-15T04:40:44,152 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/19629d6e5f0745b982c56278bfc22ba8, entries=150, sequenceid=17, filesize=30.2 K 2024-12-15T04:40:44,153 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/6895276a7a4746188213579e62ab602f as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/6895276a7a4746188213579e62ab602f 2024-12-15T04:40:44,156 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/6895276a7a4746188213579e62ab602f, entries=150, sequenceid=17, filesize=11.7 K 2024-12-15T04:40:44,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/0197c48f505b4cd69398a627bf3c90be as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/0197c48f505b4cd69398a627bf3c90be 2024-12-15T04:40:44,159 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/0197c48f505b4cd69398a627bf3c90be, entries=150, sequenceid=17, filesize=11.7 K 2024-12-15T04:40:44,159 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 3aea4ed87c2ca56ea7b1d05fdf98762f in 1700ms, sequenceid=17, compaction requested=false 2024-12-15T04:40:44,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:44,278 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:44,279 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=154 2024-12-15T04:40:44,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:44,279 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2837): Flushing 3aea4ed87c2ca56ea7b1d05fdf98762f 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-15T04:40:44,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=A 2024-12-15T04:40:44,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:44,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=B 2024-12-15T04:40:44,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:44,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=C 2024-12-15T04:40:44,279 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:44,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215e5422355273d42a986db12a9ccea1ef4_3aea4ed87c2ca56ea7b1d05fdf98762f is 50, key is test_row_0/A:col10/1734237642469/Put/seqid=0 2024-12-15T04:40:44,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742347_1523 (size=12154) 2024-12-15T04:40:44,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:44,296 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215e5422355273d42a986db12a9ccea1ef4_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215e5422355273d42a986db12a9ccea1ef4_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:44,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/350e5bdb29e94244bec86a2c3e7a40b9, store: [table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:44,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/350e5bdb29e94244bec86a2c3e7a40b9 is 175, key is test_row_0/A:col10/1734237642469/Put/seqid=0 2024-12-15T04:40:44,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742348_1524 (size=30955) 2024-12-15T04:40:44,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-15T04:40:44,587 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:44,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:44,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:44,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237704592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:44,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:44,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237704593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:44,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:44,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237704593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:44,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:44,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237704594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:44,597 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:44,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237704595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:44,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:44,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237704696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:44,697 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:44,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:44,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237704696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:44,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237704696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:44,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:44,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237704697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:44,703 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:44,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237704702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:44,705 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/350e5bdb29e94244bec86a2c3e7a40b9 2024-12-15T04:40:44,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/897f9a58de21497b957ffc056ad886bb is 50, key is test_row_0/B:col10/1734237642469/Put/seqid=0 2024-12-15T04:40:44,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742349_1525 (size=12001) 2024-12-15T04:40:44,899 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:44,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237704898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:44,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:44,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237704898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:44,900 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:44,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237704899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:44,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:44,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237704900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:44,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:44,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237704905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:45,114 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/897f9a58de21497b957ffc056ad886bb 2024-12-15T04:40:45,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/3d75704c556b4498b6f300250cb3a771 is 50, key is test_row_0/C:col10/1734237642469/Put/seqid=0 2024-12-15T04:40:45,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742350_1526 (size=12001) 2024-12-15T04:40:45,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:45,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237705201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:45,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:45,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237705202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:45,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:45,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237705202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:45,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:45,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237705203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:45,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:45,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237705207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:45,523 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/3d75704c556b4498b6f300250cb3a771 2024-12-15T04:40:45,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/350e5bdb29e94244bec86a2c3e7a40b9 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/350e5bdb29e94244bec86a2c3e7a40b9 2024-12-15T04:40:45,528 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/350e5bdb29e94244bec86a2c3e7a40b9, entries=150, sequenceid=40, filesize=30.2 K 2024-12-15T04:40:45,529 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/897f9a58de21497b957ffc056ad886bb as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/897f9a58de21497b957ffc056ad886bb 2024-12-15T04:40:45,531 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/897f9a58de21497b957ffc056ad886bb, entries=150, sequenceid=40, filesize=11.7 K 2024-12-15T04:40:45,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/3d75704c556b4498b6f300250cb3a771 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/3d75704c556b4498b6f300250cb3a771 2024-12-15T04:40:45,534 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/3d75704c556b4498b6f300250cb3a771, entries=150, sequenceid=40, filesize=11.7 K 2024-12-15T04:40:45,535 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 3aea4ed87c2ca56ea7b1d05fdf98762f in 1256ms, sequenceid=40, compaction requested=false 2024-12-15T04:40:45,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.HRegion(2538): Flush status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:45,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:45,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=154 2024-12-15T04:40:45,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=154 2024-12-15T04:40:45,537 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-12-15T04:40:45,537 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0830 sec 2024-12-15T04:40:45,538 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=153, table=TestAcidGuarantees in 3.0870 sec 2024-12-15T04:40:45,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:45,705 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3aea4ed87c2ca56ea7b1d05fdf98762f 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-15T04:40:45,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=A 2024-12-15T04:40:45,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:45,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=B 2024-12-15T04:40:45,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:45,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=C 2024-12-15T04:40:45,706 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:45,711 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215b56a9bb4418d44999df03c89e0e8a467_3aea4ed87c2ca56ea7b1d05fdf98762f is 50, key is test_row_0/A:col10/1734237645704/Put/seqid=0 2024-12-15T04:40:45,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742351_1527 (size=12154) 2024-12-15T04:40:45,715 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:45,718 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215b56a9bb4418d44999df03c89e0e8a467_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215b56a9bb4418d44999df03c89e0e8a467_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:45,718 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/740bf57f0e974be3898e91b6e504836b, store: [table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:45,719 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/740bf57f0e974be3898e91b6e504836b is 175, key is test_row_0/A:col10/1734237645704/Put/seqid=0 2024-12-15T04:40:45,721 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:45,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237705718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:45,722 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:45,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237705720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:45,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:45,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237705720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:45,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:45,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237705721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:45,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:45,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237705721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:45,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742352_1528 (size=30955) 2024-12-15T04:40:45,733 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/740bf57f0e974be3898e91b6e504836b 2024-12-15T04:40:45,739 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/766b77ceacc743e98e1bb147958d525a is 50, key is test_row_0/B:col10/1734237645704/Put/seqid=0 2024-12-15T04:40:45,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742353_1529 (size=12001) 2024-12-15T04:40:45,746 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/766b77ceacc743e98e1bb147958d525a 2024-12-15T04:40:45,756 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/563284641da84f60ab04c580ccee99a7 is 50, key is test_row_0/C:col10/1734237645704/Put/seqid=0 2024-12-15T04:40:45,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742354_1530 (size=12001) 2024-12-15T04:40:45,760 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/563284641da84f60ab04c580ccee99a7 2024-12-15T04:40:45,764 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/740bf57f0e974be3898e91b6e504836b as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/740bf57f0e974be3898e91b6e504836b 2024-12-15T04:40:45,767 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/740bf57f0e974be3898e91b6e504836b, entries=150, sequenceid=54, filesize=30.2 K 2024-12-15T04:40:45,767 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/766b77ceacc743e98e1bb147958d525a as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/766b77ceacc743e98e1bb147958d525a 2024-12-15T04:40:45,770 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/766b77ceacc743e98e1bb147958d525a, entries=150, sequenceid=54, filesize=11.7 K 2024-12-15T04:40:45,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/563284641da84f60ab04c580ccee99a7 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/563284641da84f60ab04c580ccee99a7 2024-12-15T04:40:45,776 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/563284641da84f60ab04c580ccee99a7, entries=150, sequenceid=54, filesize=11.7 K 2024-12-15T04:40:45,777 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 3aea4ed87c2ca56ea7b1d05fdf98762f in 72ms, sequenceid=54, compaction requested=true 2024-12-15T04:40:45,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:45,778 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:45,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3aea4ed87c2ca56ea7b1d05fdf98762f:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:40:45,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:45,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3aea4ed87c2ca56ea7b1d05fdf98762f:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:40:45,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:45,778 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:45,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3aea4ed87c2ca56ea7b1d05fdf98762f:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:40:45,778 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:45,780 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:45,780 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 3aea4ed87c2ca56ea7b1d05fdf98762f/B is initiating minor compaction (all files) 2024-12-15T04:40:45,780 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3aea4ed87c2ca56ea7b1d05fdf98762f/B in TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:45,780 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/6895276a7a4746188213579e62ab602f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/897f9a58de21497b957ffc056ad886bb, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/766b77ceacc743e98e1bb147958d525a] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp, totalSize=35.2 K 2024-12-15T04:40:45,780 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:45,780 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 3aea4ed87c2ca56ea7b1d05fdf98762f/A is initiating minor compaction (all files) 2024-12-15T04:40:45,780 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3aea4ed87c2ca56ea7b1d05fdf98762f/A in TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:45,780 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/19629d6e5f0745b982c56278bfc22ba8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/350e5bdb29e94244bec86a2c3e7a40b9, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/740bf57f0e974be3898e91b6e504836b] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp, totalSize=90.7 K 2024-12-15T04:40:45,780 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 6895276a7a4746188213579e62ab602f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1734237642456 2024-12-15T04:40:45,780 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:45,780 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. files: [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/19629d6e5f0745b982c56278bfc22ba8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/350e5bdb29e94244bec86a2c3e7a40b9, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/740bf57f0e974be3898e91b6e504836b] 2024-12-15T04:40:45,780 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 19629d6e5f0745b982c56278bfc22ba8, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1734237642456 2024-12-15T04:40:45,780 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 897f9a58de21497b957ffc056ad886bb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734237642469 2024-12-15T04:40:45,781 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 766b77ceacc743e98e1bb147958d525a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734237644592 2024-12-15T04:40:45,781 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 350e5bdb29e94244bec86a2c3e7a40b9, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734237642469 2024-12-15T04:40:45,781 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 740bf57f0e974be3898e91b6e504836b, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734237644592 2024-12-15T04:40:45,786 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:45,787 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3aea4ed87c2ca56ea7b1d05fdf98762f#B#compaction#445 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:45,788 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/3092f3fdd5ec4f5b927edfa13bfa8c5e is 50, key is test_row_0/B:col10/1734237645704/Put/seqid=0 2024-12-15T04:40:45,792 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024121579b1d21bca184666ab95f9248105d5fa_3aea4ed87c2ca56ea7b1d05fdf98762f store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:45,793 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024121579b1d21bca184666ab95f9248105d5fa_3aea4ed87c2ca56ea7b1d05fdf98762f, store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:45,794 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121579b1d21bca184666ab95f9248105d5fa_3aea4ed87c2ca56ea7b1d05fdf98762f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:45,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742355_1531 (size=12104) 2024-12-15T04:40:45,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742356_1532 (size=4469) 2024-12-15T04:40:45,810 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3aea4ed87c2ca56ea7b1d05fdf98762f#A#compaction#444 average throughput is 1.02 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:45,811 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/657b42659b5b4640a0a7aa1c3a82cf27 is 175, key is test_row_0/A:col10/1734237645704/Put/seqid=0 2024-12-15T04:40:45,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742357_1533 (size=31058) 2024-12-15T04:40:45,819 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/657b42659b5b4640a0a7aa1c3a82cf27 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/657b42659b5b4640a0a7aa1c3a82cf27 2024-12-15T04:40:45,824 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3aea4ed87c2ca56ea7b1d05fdf98762f/A of 3aea4ed87c2ca56ea7b1d05fdf98762f into 657b42659b5b4640a0a7aa1c3a82cf27(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:45,824 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:45,824 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f., storeName=3aea4ed87c2ca56ea7b1d05fdf98762f/A, priority=13, startTime=1734237645777; duration=0sec 2024-12-15T04:40:45,824 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:45,824 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3aea4ed87c2ca56ea7b1d05fdf98762f:A 2024-12-15T04:40:45,824 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:45,825 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:45,825 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 3aea4ed87c2ca56ea7b1d05fdf98762f/C is initiating minor compaction (all files) 2024-12-15T04:40:45,825 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3aea4ed87c2ca56ea7b1d05fdf98762f/C in TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:45,825 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/0197c48f505b4cd69398a627bf3c90be, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/3d75704c556b4498b6f300250cb3a771, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/563284641da84f60ab04c580ccee99a7] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp, totalSize=35.2 K 2024-12-15T04:40:45,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:45,826 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3aea4ed87c2ca56ea7b1d05fdf98762f 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-12-15T04:40:45,826 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0197c48f505b4cd69398a627bf3c90be, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1734237642456 2024-12-15T04:40:45,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=A 2024-12-15T04:40:45,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:45,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=B 2024-12-15T04:40:45,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:45,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=C 2024-12-15T04:40:45,826 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:45,826 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d75704c556b4498b6f300250cb3a771, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1734237642469 2024-12-15T04:40:45,827 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 563284641da84f60ab04c580ccee99a7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734237644592 2024-12-15T04:40:45,832 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215f5c8e7af223044559f7fd55f08b0e3a1_3aea4ed87c2ca56ea7b1d05fdf98762f is 50, key is test_row_0/A:col10/1734237645825/Put/seqid=0 2024-12-15T04:40:45,836 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:45,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237705833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:45,836 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:45,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237705833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:45,837 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3aea4ed87c2ca56ea7b1d05fdf98762f#C#compaction#447 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:45,837 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/ab9ba3eee34b40d7aad68e431a329585 is 50, key is test_row_0/C:col10/1734237645704/Put/seqid=0 2024-12-15T04:40:45,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742358_1534 (size=17034) 2024-12-15T04:40:45,839 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:45,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:45,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237705837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:45,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:45,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237705837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:45,840 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:45,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237705837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:45,842 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215f5c8e7af223044559f7fd55f08b0e3a1_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215f5c8e7af223044559f7fd55f08b0e3a1_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:45,842 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/dba1658152dd40da8353007de8bad591, store: [table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:45,843 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/dba1658152dd40da8353007de8bad591 is 175, key is test_row_0/A:col10/1734237645825/Put/seqid=0 2024-12-15T04:40:45,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742359_1535 (size=12104) 2024-12-15T04:40:45,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742360_1536 (size=48139) 2024-12-15T04:40:45,848 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/dba1658152dd40da8353007de8bad591 2024-12-15T04:40:45,850 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/ab9ba3eee34b40d7aad68e431a329585 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/ab9ba3eee34b40d7aad68e431a329585 2024-12-15T04:40:45,855 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/db4212097e85495287a9baf7427587d9 is 50, key is test_row_0/B:col10/1734237645825/Put/seqid=0 2024-12-15T04:40:45,855 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3aea4ed87c2ca56ea7b1d05fdf98762f/C of 3aea4ed87c2ca56ea7b1d05fdf98762f into ab9ba3eee34b40d7aad68e431a329585(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:45,855 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:45,855 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f., storeName=3aea4ed87c2ca56ea7b1d05fdf98762f/C, priority=13, startTime=1734237645778; duration=0sec 2024-12-15T04:40:45,855 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:45,855 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3aea4ed87c2ca56ea7b1d05fdf98762f:C 2024-12-15T04:40:45,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742361_1537 (size=12001) 2024-12-15T04:40:45,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:45,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237705937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:45,939 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:45,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237705937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:45,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:45,942 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:45,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237705940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:45,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237705940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:45,942 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:45,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237705940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:46,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:46,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237706140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:46,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:46,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237706140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:46,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:46,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237706143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:46,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:46,144 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:46,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237706143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:46,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237706143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:46,205 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/3092f3fdd5ec4f5b927edfa13bfa8c5e as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/3092f3fdd5ec4f5b927edfa13bfa8c5e 2024-12-15T04:40:46,208 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3aea4ed87c2ca56ea7b1d05fdf98762f/B of 3aea4ed87c2ca56ea7b1d05fdf98762f into 3092f3fdd5ec4f5b927edfa13bfa8c5e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:46,208 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:46,208 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f., storeName=3aea4ed87c2ca56ea7b1d05fdf98762f/B, priority=13, startTime=1734237645778; duration=0sec 2024-12-15T04:40:46,208 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:46,208 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3aea4ed87c2ca56ea7b1d05fdf98762f:B 2024-12-15T04:40:46,261 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/db4212097e85495287a9baf7427587d9 2024-12-15T04:40:46,266 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/7df46f4a24db445f8b6969de65f7a554 is 50, key is test_row_0/C:col10/1734237645825/Put/seqid=0 2024-12-15T04:40:46,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742362_1538 (size=12001) 2024-12-15T04:40:46,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:46,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237706443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:46,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:46,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237706444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:46,446 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:46,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237706445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:46,446 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:46,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237706446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:46,448 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:46,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237706447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:46,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=153 2024-12-15T04:40:46,558 INFO [Thread-2320 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 153 completed 2024-12-15T04:40:46,559 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:40:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees 2024-12-15T04:40:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-15T04:40:46,560 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:40:46,560 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=155, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:40:46,561 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:40:46,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-15T04:40:46,669 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/7df46f4a24db445f8b6969de65f7a554 2024-12-15T04:40:46,672 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/dba1658152dd40da8353007de8bad591 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/dba1658152dd40da8353007de8bad591 2024-12-15T04:40:46,674 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/dba1658152dd40da8353007de8bad591, entries=250, sequenceid=79, filesize=47.0 K 2024-12-15T04:40:46,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/db4212097e85495287a9baf7427587d9 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/db4212097e85495287a9baf7427587d9 2024-12-15T04:40:46,678 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/db4212097e85495287a9baf7427587d9, entries=150, sequenceid=79, filesize=11.7 K 2024-12-15T04:40:46,678 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/7df46f4a24db445f8b6969de65f7a554 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/7df46f4a24db445f8b6969de65f7a554 2024-12-15T04:40:46,681 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/7df46f4a24db445f8b6969de65f7a554, entries=150, sequenceid=79, filesize=11.7 K 2024-12-15T04:40:46,681 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=73.80 KB/75570 for 3aea4ed87c2ca56ea7b1d05fdf98762f in 855ms, sequenceid=79, compaction requested=false 2024-12-15T04:40:46,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:46,712 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:46,712 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=156 2024-12-15T04:40:46,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:46,712 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2837): Flushing 3aea4ed87c2ca56ea7b1d05fdf98762f 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-15T04:40:46,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=A 2024-12-15T04:40:46,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:46,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=B 2024-12-15T04:40:46,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:46,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=C 2024-12-15T04:40:46,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:46,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215e446431fac8a4c61a0c95d4c5c408a7b_3aea4ed87c2ca56ea7b1d05fdf98762f is 50, key is test_row_0/A:col10/1734237645835/Put/seqid=0 2024-12-15T04:40:46,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742363_1539 (size=12154) 2024-12-15T04:40:46,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-15T04:40:46,947 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:46,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:46,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:46,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:46,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237706959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:46,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237706959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:46,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:46,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:46,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237706960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:46,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237706960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:46,964 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:46,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237706962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:47,064 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:47,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237707063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:47,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:47,065 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:47,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237707063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:47,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237707063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:47,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:47,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237707063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:47,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:47,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237707064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:47,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:47,124 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215e446431fac8a4c61a0c95d4c5c408a7b_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215e446431fac8a4c61a0c95d4c5c408a7b_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:47,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/1e663ad022ba48b09570d58775208f46, store: [table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:47,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/1e663ad022ba48b09570d58775208f46 is 175, key is test_row_0/A:col10/1734237645835/Put/seqid=0 2024-12-15T04:40:47,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742364_1540 (size=30955) 2024-12-15T04:40:47,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-15T04:40:47,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:47,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237707266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:47,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:47,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237707266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:47,267 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:47,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237707266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:47,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:47,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237707266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:47,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:47,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237707268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:47,530 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=95, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/1e663ad022ba48b09570d58775208f46 2024-12-15T04:40:47,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/fdb41992d2cc496bb764d202b94b399d is 50, key is test_row_0/B:col10/1734237645835/Put/seqid=0 2024-12-15T04:40:47,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742365_1541 (size=12001) 2024-12-15T04:40:47,569 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:47,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237707568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:47,571 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:47,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237707569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:47,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:47,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237707569, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:47,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:47,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237707570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:47,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:47,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237707571, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:47,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-15T04:40:47,943 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/fdb41992d2cc496bb764d202b94b399d 2024-12-15T04:40:47,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/fe285c5affa54fe3b5f2c550eee1fbed is 50, key is test_row_0/C:col10/1734237645835/Put/seqid=0 2024-12-15T04:40:47,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742366_1542 (size=12001) 2024-12-15T04:40:48,072 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:48,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237708071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:48,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:48,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237708073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:48,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:48,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237708074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:48,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:48,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237708074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:48,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:48,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237708077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:48,351 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/fe285c5affa54fe3b5f2c550eee1fbed 2024-12-15T04:40:48,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/1e663ad022ba48b09570d58775208f46 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/1e663ad022ba48b09570d58775208f46 2024-12-15T04:40:48,357 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/1e663ad022ba48b09570d58775208f46, entries=150, sequenceid=95, filesize=30.2 K 2024-12-15T04:40:48,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/fdb41992d2cc496bb764d202b94b399d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/fdb41992d2cc496bb764d202b94b399d 2024-12-15T04:40:48,360 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/fdb41992d2cc496bb764d202b94b399d, entries=150, sequenceid=95, filesize=11.7 K 2024-12-15T04:40:48,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/fe285c5affa54fe3b5f2c550eee1fbed as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/fe285c5affa54fe3b5f2c550eee1fbed 2024-12-15T04:40:48,363 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/fe285c5affa54fe3b5f2c550eee1fbed, entries=150, sequenceid=95, filesize=11.7 K 2024-12-15T04:40:48,364 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 3aea4ed87c2ca56ea7b1d05fdf98762f in 1652ms, sequenceid=95, compaction requested=true 2024-12-15T04:40:48,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.HRegion(2538): Flush status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:48,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:48,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-12-15T04:40:48,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=156 2024-12-15T04:40:48,365 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-15T04:40:48,365 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8030 sec 2024-12-15T04:40:48,366 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=155, table=TestAcidGuarantees in 1.8060 sec 2024-12-15T04:40:48,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-15T04:40:48,663 INFO [Thread-2320 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 155 completed 2024-12-15T04:40:48,664 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:40:48,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees 2024-12-15T04:40:48,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-15T04:40:48,665 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:40:48,665 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:40:48,666 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:40:48,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-15T04:40:48,817 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:48,817 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-12-15T04:40:48,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:48,817 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2837): Flushing 3aea4ed87c2ca56ea7b1d05fdf98762f 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-12-15T04:40:48,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=A 2024-12-15T04:40:48,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:48,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=B 2024-12-15T04:40:48,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:48,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=C 2024-12-15T04:40:48,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:48,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215827908f553d44627a4917aaa8505f971_3aea4ed87c2ca56ea7b1d05fdf98762f is 50, key is test_row_0/A:col10/1734237646961/Put/seqid=0 2024-12-15T04:40:48,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742367_1543 (size=12154) 2024-12-15T04:40:48,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-15T04:40:49,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:49,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:49,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:49,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:49,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237709083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:49,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237709083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:49,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:49,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237709083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:49,085 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:49,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237709084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:49,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:49,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237709085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:49,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:49,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237709186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:49,187 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:49,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237709186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:49,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:49,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237709186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:49,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:49,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237709186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:49,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:49,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237709187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:49,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:49,228 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215827908f553d44627a4917aaa8505f971_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215827908f553d44627a4917aaa8505f971_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:49,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/d8ab357744314efdbc260bb6faa4fae4, store: [table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:49,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/d8ab357744314efdbc260bb6faa4fae4 is 175, key is test_row_0/A:col10/1734237646961/Put/seqid=0 2024-12-15T04:40:49,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742368_1544 (size=30955) 2024-12-15T04:40:49,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-15T04:40:49,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:49,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237709388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:49,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:49,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237709388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:49,390 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:49,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237709388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:49,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:49,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237709389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:49,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:49,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237709390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:49,632 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/d8ab357744314efdbc260bb6faa4fae4 2024-12-15T04:40:49,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/26b978a0fc284b34ad1d3d1a946018a7 is 50, key is test_row_0/B:col10/1734237646961/Put/seqid=0 2024-12-15T04:40:49,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742369_1545 (size=12001) 2024-12-15T04:40:49,691 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:49,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237709690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:49,692 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:49,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237709691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:49,692 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:49,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237709691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:49,693 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:49,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237709691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:49,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:49,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237709693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:49,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-15T04:40:50,041 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/26b978a0fc284b34ad1d3d1a946018a7 2024-12-15T04:40:50,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/1409b48705ed405280fe1b066c3b5d62 is 50, key is test_row_0/C:col10/1734237646961/Put/seqid=0 2024-12-15T04:40:50,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742370_1546 (size=12001) 2024-12-15T04:40:50,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:50,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237710193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:50,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:50,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237710193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:50,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:50,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237710197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:50,198 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:50,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237710197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:50,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:50,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237710199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:50,449 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/1409b48705ed405280fe1b066c3b5d62 2024-12-15T04:40:50,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/d8ab357744314efdbc260bb6faa4fae4 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/d8ab357744314efdbc260bb6faa4fae4 2024-12-15T04:40:50,454 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/d8ab357744314efdbc260bb6faa4fae4, entries=150, sequenceid=117, filesize=30.2 K 2024-12-15T04:40:50,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/26b978a0fc284b34ad1d3d1a946018a7 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/26b978a0fc284b34ad1d3d1a946018a7 2024-12-15T04:40:50,457 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/26b978a0fc284b34ad1d3d1a946018a7, entries=150, sequenceid=117, filesize=11.7 K 2024-12-15T04:40:50,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/1409b48705ed405280fe1b066c3b5d62 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/1409b48705ed405280fe1b066c3b5d62 2024-12-15T04:40:50,460 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/1409b48705ed405280fe1b066c3b5d62, entries=150, sequenceid=117, filesize=11.7 K 2024-12-15T04:40:50,461 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 3aea4ed87c2ca56ea7b1d05fdf98762f in 1643ms, sequenceid=117, compaction requested=true 2024-12-15T04:40:50,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2538): Flush status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:50,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:50,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=158 2024-12-15T04:40:50,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=158 2024-12-15T04:40:50,462 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-12-15T04:40:50,462 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7960 sec 2024-12-15T04:40:50,463 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees in 1.7980 sec 2024-12-15T04:40:50,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-12-15T04:40:50,768 INFO [Thread-2320 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-12-15T04:40:50,769 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:40:50,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-12-15T04:40:50,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-15T04:40:50,770 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:40:50,770 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:40:50,771 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:40:50,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-15T04:40:50,921 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:50,922 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-12-15T04:40:50,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:50,922 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing 3aea4ed87c2ca56ea7b1d05fdf98762f 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-12-15T04:40:50,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=A 2024-12-15T04:40:50,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:50,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=B 2024-12-15T04:40:50,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:50,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=C 2024-12-15T04:40:50,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:50,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121507c58f3099444eb886576994951086b1_3aea4ed87c2ca56ea7b1d05fdf98762f is 50, key is test_row_0/A:col10/1734237649078/Put/seqid=0 2024-12-15T04:40:50,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742371_1547 (size=12204) 2024-12-15T04:40:51,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-15T04:40:51,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:51,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:51,215 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:51,215 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:51,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237711212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:51,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237711212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:51,217 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:51,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237711215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:51,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:51,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237711215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:51,218 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:51,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237711216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:51,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:51,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237711316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:51,317 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:51,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237711316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:51,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:51,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237711318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:51,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:51,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237711318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:51,320 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:51,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237711318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:51,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:51,333 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121507c58f3099444eb886576994951086b1_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121507c58f3099444eb886576994951086b1_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:51,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/a366d367149846d8b634c3891a97a186, store: [table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:51,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/a366d367149846d8b634c3891a97a186 is 175, key is test_row_0/A:col10/1734237649078/Put/seqid=0 2024-12-15T04:40:51,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742372_1548 (size=31005) 2024-12-15T04:40:51,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-15T04:40:51,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:51,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237711518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:51,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:51,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237711519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:51,523 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:51,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237711522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:51,523 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:51,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237711522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:51,523 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:51,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237711522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:51,741 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=131, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/a366d367149846d8b634c3891a97a186 2024-12-15T04:40:51,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/84a3ee74bb8e4d5aab805c86f33f7984 is 50, key is test_row_0/B:col10/1734237649078/Put/seqid=0 2024-12-15T04:40:51,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742373_1549 (size=12051) 2024-12-15T04:40:51,749 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/84a3ee74bb8e4d5aab805c86f33f7984 2024-12-15T04:40:51,754 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/9f018f1043b6469fb6ec01e91c9e587a is 50, key is test_row_0/C:col10/1734237649078/Put/seqid=0 2024-12-15T04:40:51,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742374_1550 (size=12051) 2024-12-15T04:40:51,822 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:51,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237711820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:51,824 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:51,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237711822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:51,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:51,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237711824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:51,826 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:51,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237711825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:51,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:51,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237711826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:51,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-15T04:40:52,157 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/9f018f1043b6469fb6ec01e91c9e587a 2024-12-15T04:40:52,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/a366d367149846d8b634c3891a97a186 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/a366d367149846d8b634c3891a97a186 2024-12-15T04:40:52,162 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/a366d367149846d8b634c3891a97a186, entries=150, sequenceid=131, filesize=30.3 K 2024-12-15T04:40:52,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/84a3ee74bb8e4d5aab805c86f33f7984 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/84a3ee74bb8e4d5aab805c86f33f7984 2024-12-15T04:40:52,165 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/84a3ee74bb8e4d5aab805c86f33f7984, entries=150, sequenceid=131, filesize=11.8 K 2024-12-15T04:40:52,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/9f018f1043b6469fb6ec01e91c9e587a as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/9f018f1043b6469fb6ec01e91c9e587a 2024-12-15T04:40:52,168 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/9f018f1043b6469fb6ec01e91c9e587a, entries=150, sequenceid=131, filesize=11.8 K 2024-12-15T04:40:52,168 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 3aea4ed87c2ca56ea7b1d05fdf98762f in 1246ms, sequenceid=131, compaction requested=true 2024-12-15T04:40:52,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:52,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:52,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-12-15T04:40:52,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-12-15T04:40:52,170 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-12-15T04:40:52,170 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3990 sec 2024-12-15T04:40:52,170 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 1.4010 sec 2024-12-15T04:40:52,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:52,328 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3aea4ed87c2ca56ea7b1d05fdf98762f 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-12-15T04:40:52,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=A 2024-12-15T04:40:52,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:52,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=B 2024-12-15T04:40:52,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:52,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=C 2024-12-15T04:40:52,328 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:52,333 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215d487b39172e843edb10a1ffe1e445524_3aea4ed87c2ca56ea7b1d05fdf98762f is 50, key is test_row_0/A:col10/1734237652327/Put/seqid=0 2024-12-15T04:40:52,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742375_1551 (size=14794) 2024-12-15T04:40:52,338 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:52,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237712336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:52,340 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:52,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237712336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:52,340 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:52,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237712337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:52,340 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:52,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237712337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:52,341 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:52,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237712339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:52,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:52,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237712439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:52,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:52,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237712440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:52,442 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:52,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237712440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:52,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:52,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237712441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:52,444 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:52,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237712442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:52,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:52,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237712643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:52,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:52,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237712643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:52,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:52,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237712643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:52,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:52,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237712644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:52,646 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:52,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237712644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:52,736 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:52,738 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215d487b39172e843edb10a1ffe1e445524_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215d487b39172e843edb10a1ffe1e445524_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:52,739 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/8983821de9d04f48a90463921c021795, store: [table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:52,739 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/8983821de9d04f48a90463921c021795 is 175, key is test_row_0/A:col10/1734237652327/Put/seqid=0 2024-12-15T04:40:52,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742376_1552 (size=39749) 2024-12-15T04:40:52,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-12-15T04:40:52,873 INFO [Thread-2320 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-12-15T04:40:52,874 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:40:52,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-12-15T04:40:52,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-15T04:40:52,875 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:40:52,875 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:40:52,876 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:40:52,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:52,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237712945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:52,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:52,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237712945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:52,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:52,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237712945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:52,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:52,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237712947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:52,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:52,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237712948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:52,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-15T04:40:53,026 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:53,027 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-15T04:40:53,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:53,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:53,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:53,027 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:53,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:53,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:53,142 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=155, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/8983821de9d04f48a90463921c021795 2024-12-15T04:40:53,148 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/9d97abb7a1e24e48aacd1ab3700742c1 is 50, key is test_row_0/B:col10/1734237652327/Put/seqid=0 2024-12-15T04:40:53,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742377_1553 (size=12151) 2024-12-15T04:40:53,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-15T04:40:53,179 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:53,179 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-15T04:40:53,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:53,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:53,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:53,179 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:53,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:53,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:53,333 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:53,334 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-15T04:40:53,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:53,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:53,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:53,334 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:53,334 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:53,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:53,449 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:53,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237713448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:53,451 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:53,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237713450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:53,452 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:53,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237713450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:53,452 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:53,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237713450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:53,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:53,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237713452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:53,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-15T04:40:53,485 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:53,486 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-15T04:40:53,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:53,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:53,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:53,486 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:53,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:53,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:53,564 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/9d97abb7a1e24e48aacd1ab3700742c1 2024-12-15T04:40:53,569 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/4b9ec3d055964fe5b10925bef804b425 is 50, key is test_row_0/C:col10/1734237652327/Put/seqid=0 2024-12-15T04:40:53,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742378_1554 (size=12151) 2024-12-15T04:40:53,638 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:53,638 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-15T04:40:53,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:53,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:53,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:53,638 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:53,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:53,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:53,789 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:53,789 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-15T04:40:53,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:53,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:53,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:53,789 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:53,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:53,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:53,941 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:53,941 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-15T04:40:53,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:53,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:53,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:53,942 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:53,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:53,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:53,973 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/4b9ec3d055964fe5b10925bef804b425 2024-12-15T04:40:53,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/8983821de9d04f48a90463921c021795 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/8983821de9d04f48a90463921c021795 2024-12-15T04:40:53,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-15T04:40:53,979 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/8983821de9d04f48a90463921c021795, entries=200, sequenceid=155, filesize=38.8 K 2024-12-15T04:40:53,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/9d97abb7a1e24e48aacd1ab3700742c1 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/9d97abb7a1e24e48aacd1ab3700742c1 2024-12-15T04:40:53,983 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/9d97abb7a1e24e48aacd1ab3700742c1, entries=150, sequenceid=155, filesize=11.9 K 2024-12-15T04:40:53,983 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/4b9ec3d055964fe5b10925bef804b425 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/4b9ec3d055964fe5b10925bef804b425 2024-12-15T04:40:53,986 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/4b9ec3d055964fe5b10925bef804b425, entries=150, sequenceid=155, filesize=11.9 K 2024-12-15T04:40:53,986 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 3aea4ed87c2ca56ea7b1d05fdf98762f in 1658ms, sequenceid=155, compaction requested=true 2024-12-15T04:40:53,986 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:53,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3aea4ed87c2ca56ea7b1d05fdf98762f:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:40:53,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:53,986 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-12-15T04:40:53,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3aea4ed87c2ca56ea7b1d05fdf98762f:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:40:53,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:53,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3aea4ed87c2ca56ea7b1d05fdf98762f:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:40:53,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:53,987 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-12-15T04:40:53,988 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 211861 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-12-15T04:40:53,988 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 72309 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-12-15T04:40:53,988 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 3aea4ed87c2ca56ea7b1d05fdf98762f/B is initiating minor compaction (all files) 2024-12-15T04:40:53,988 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 3aea4ed87c2ca56ea7b1d05fdf98762f/A is initiating minor compaction (all files) 2024-12-15T04:40:53,988 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3aea4ed87c2ca56ea7b1d05fdf98762f/B in TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:53,988 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3aea4ed87c2ca56ea7b1d05fdf98762f/A in TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:53,988 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/3092f3fdd5ec4f5b927edfa13bfa8c5e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/db4212097e85495287a9baf7427587d9, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/fdb41992d2cc496bb764d202b94b399d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/26b978a0fc284b34ad1d3d1a946018a7, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/84a3ee74bb8e4d5aab805c86f33f7984, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/9d97abb7a1e24e48aacd1ab3700742c1] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp, totalSize=70.6 K 2024-12-15T04:40:53,988 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/657b42659b5b4640a0a7aa1c3a82cf27, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/dba1658152dd40da8353007de8bad591, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/1e663ad022ba48b09570d58775208f46, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/d8ab357744314efdbc260bb6faa4fae4, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/a366d367149846d8b634c3891a97a186, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/8983821de9d04f48a90463921c021795] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp, totalSize=206.9 K 2024-12-15T04:40:53,988 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=10 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:53,988 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. files: [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/657b42659b5b4640a0a7aa1c3a82cf27, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/dba1658152dd40da8353007de8bad591, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/1e663ad022ba48b09570d58775208f46, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/d8ab357744314efdbc260bb6faa4fae4, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/a366d367149846d8b634c3891a97a186, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/8983821de9d04f48a90463921c021795] 2024-12-15T04:40:53,988 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 3092f3fdd5ec4f5b927edfa13bfa8c5e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734237644592 2024-12-15T04:40:53,988 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 657b42659b5b4640a0a7aa1c3a82cf27, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734237644592 2024-12-15T04:40:53,988 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting db4212097e85495287a9baf7427587d9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734237645717 2024-12-15T04:40:53,988 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting dba1658152dd40da8353007de8bad591, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734237645717 2024-12-15T04:40:53,989 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting fdb41992d2cc496bb764d202b94b399d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1734237645835 2024-12-15T04:40:53,989 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e663ad022ba48b09570d58775208f46, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1734237645835 2024-12-15T04:40:53,989 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8ab357744314efdbc260bb6faa4fae4, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734237646959 2024-12-15T04:40:53,989 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 26b978a0fc284b34ad1d3d1a946018a7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734237646959 2024-12-15T04:40:53,989 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting a366d367149846d8b634c3891a97a186, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734237649078 2024-12-15T04:40:53,989 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 84a3ee74bb8e4d5aab805c86f33f7984, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734237649078 2024-12-15T04:40:53,989 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8983821de9d04f48a90463921c021795, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1734237651211 2024-12-15T04:40:53,989 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d97abb7a1e24e48aacd1ab3700742c1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1734237651214 2024-12-15T04:40:53,998 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3aea4ed87c2ca56ea7b1d05fdf98762f#B#compaction#462 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:53,999 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/c30114945dac41aa8167041d64677829 is 50, key is test_row_0/B:col10/1734237652327/Put/seqid=0 2024-12-15T04:40:54,000 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:54,012 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412151aee5df911084e7494d70f188f74dae1_3aea4ed87c2ca56ea7b1d05fdf98762f store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:54,014 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412151aee5df911084e7494d70f188f74dae1_3aea4ed87c2ca56ea7b1d05fdf98762f, store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:54,014 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412151aee5df911084e7494d70f188f74dae1_3aea4ed87c2ca56ea7b1d05fdf98762f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:54,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742379_1555 (size=12459) 2024-12-15T04:40:54,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742380_1556 (size=4469) 2024-12-15T04:40:54,093 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:54,093 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-12-15T04:40:54,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:54,094 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing 3aea4ed87c2ca56ea7b1d05fdf98762f 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-15T04:40:54,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=A 2024-12-15T04:40:54,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:54,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=B 2024-12-15T04:40:54,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:54,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=C 2024-12-15T04:40:54,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:54,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215641c77d8c1034e65b3d6a357f4d1efea_3aea4ed87c2ca56ea7b1d05fdf98762f is 50, key is test_row_0/A:col10/1734237652330/Put/seqid=0 2024-12-15T04:40:54,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742381_1557 (size=12304) 2024-12-15T04:40:54,417 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/c30114945dac41aa8167041d64677829 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/c30114945dac41aa8167041d64677829 2024-12-15T04:40:54,418 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3aea4ed87c2ca56ea7b1d05fdf98762f#A#compaction#463 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:54,418 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/e6816a312873443db70ef89333d0214e is 175, key is test_row_0/A:col10/1734237652327/Put/seqid=0 2024-12-15T04:40:54,420 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 3aea4ed87c2ca56ea7b1d05fdf98762f/B of 3aea4ed87c2ca56ea7b1d05fdf98762f into c30114945dac41aa8167041d64677829(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:54,421 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:54,421 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f., storeName=3aea4ed87c2ca56ea7b1d05fdf98762f/B, priority=10, startTime=1734237653986; duration=0sec 2024-12-15T04:40:54,421 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:54,421 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3aea4ed87c2ca56ea7b1d05fdf98762f:B 2024-12-15T04:40:54,421 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-12-15T04:40:54,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742382_1558 (size=31413) 2024-12-15T04:40:54,422 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 72309 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-12-15T04:40:54,422 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 3aea4ed87c2ca56ea7b1d05fdf98762f/C is initiating minor compaction (all files) 2024-12-15T04:40:54,422 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3aea4ed87c2ca56ea7b1d05fdf98762f/C in TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:54,423 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/ab9ba3eee34b40d7aad68e431a329585, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/7df46f4a24db445f8b6969de65f7a554, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/fe285c5affa54fe3b5f2c550eee1fbed, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/1409b48705ed405280fe1b066c3b5d62, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/9f018f1043b6469fb6ec01e91c9e587a, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/4b9ec3d055964fe5b10925bef804b425] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp, totalSize=70.6 K 2024-12-15T04:40:54,423 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting ab9ba3eee34b40d7aad68e431a329585, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1734237644592 2024-12-15T04:40:54,423 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 7df46f4a24db445f8b6969de65f7a554, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1734237645717 2024-12-15T04:40:54,423 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting fe285c5affa54fe3b5f2c550eee1fbed, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1734237645835 2024-12-15T04:40:54,424 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 1409b48705ed405280fe1b066c3b5d62, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1734237646959 2024-12-15T04:40:54,424 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f018f1043b6469fb6ec01e91c9e587a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1734237649078 2024-12-15T04:40:54,424 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b9ec3d055964fe5b10925bef804b425, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1734237651214 2024-12-15T04:40:54,424 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/e6816a312873443db70ef89333d0214e as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/e6816a312873443db70ef89333d0214e 2024-12-15T04:40:54,427 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 3aea4ed87c2ca56ea7b1d05fdf98762f/A of 3aea4ed87c2ca56ea7b1d05fdf98762f into e6816a312873443db70ef89333d0214e(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:54,427 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:54,427 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f., storeName=3aea4ed87c2ca56ea7b1d05fdf98762f/A, priority=10, startTime=1734237653986; duration=0sec 2024-12-15T04:40:54,428 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:54,428 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3aea4ed87c2ca56ea7b1d05fdf98762f:A 2024-12-15T04:40:54,433 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3aea4ed87c2ca56ea7b1d05fdf98762f#C#compaction#465 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:54,433 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/6fbf055e384b47d9b3047810a0e6c50f is 50, key is test_row_0/C:col10/1734237652327/Put/seqid=0 2024-12-15T04:40:54,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742383_1559 (size=12459) 2024-12-15T04:40:54,440 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/6fbf055e384b47d9b3047810a0e6c50f as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/6fbf055e384b47d9b3047810a0e6c50f 2024-12-15T04:40:54,445 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 3aea4ed87c2ca56ea7b1d05fdf98762f/C of 3aea4ed87c2ca56ea7b1d05fdf98762f into 6fbf055e384b47d9b3047810a0e6c50f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:54,445 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:54,445 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f., storeName=3aea4ed87c2ca56ea7b1d05fdf98762f/C, priority=10, startTime=1734237653986; duration=0sec 2024-12-15T04:40:54,445 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:54,446 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3aea4ed87c2ca56ea7b1d05fdf98762f:C 2024-12-15T04:40:54,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:54,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:54,495 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:54,495 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:54,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237714491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:54,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237714491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:54,495 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:54,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237714492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:54,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:54,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237714494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:54,502 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:54,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237714500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:54,505 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:54,507 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215641c77d8c1034e65b3d6a357f4d1efea_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215641c77d8c1034e65b3d6a357f4d1efea_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:54,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/ae78338ab8d0449f8c4d08fefb1260f1, store: [table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:54,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/ae78338ab8d0449f8c4d08fefb1260f1 is 175, key is test_row_0/A:col10/1734237652330/Put/seqid=0 2024-12-15T04:40:54,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742384_1560 (size=31105) 2024-12-15T04:40:54,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:54,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237714596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:54,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:54,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237714596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:54,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:54,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237714596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:54,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:54,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237714598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:54,604 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:54,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237714603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:54,799 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:54,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:54,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237714798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:54,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237714798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:54,799 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:54,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237714799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:54,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:54,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237714799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:54,807 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:54,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237714806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:54,912 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=167, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/ae78338ab8d0449f8c4d08fefb1260f1 2024-12-15T04:40:54,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/73f96992fdc04b44b627b72a0665998f is 50, key is test_row_0/B:col10/1734237652330/Put/seqid=0 2024-12-15T04:40:54,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742385_1561 (size=12151) 2024-12-15T04:40:54,931 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/73f96992fdc04b44b627b72a0665998f 2024-12-15T04:40:54,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/206ec7a4ff524dcf9f09fbccadb745ab is 50, key is test_row_0/C:col10/1734237652330/Put/seqid=0 2024-12-15T04:40:54,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742386_1562 (size=12151) 2024-12-15T04:40:54,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-15T04:40:55,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:55,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237715099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:55,102 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:55,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237715100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:55,102 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:55,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237715101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:55,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:55,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237715103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:55,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:55,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237715110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:55,341 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/206ec7a4ff524dcf9f09fbccadb745ab 2024-12-15T04:40:55,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/ae78338ab8d0449f8c4d08fefb1260f1 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/ae78338ab8d0449f8c4d08fefb1260f1 2024-12-15T04:40:55,346 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/ae78338ab8d0449f8c4d08fefb1260f1, entries=150, sequenceid=167, filesize=30.4 K 2024-12-15T04:40:55,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/73f96992fdc04b44b627b72a0665998f as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/73f96992fdc04b44b627b72a0665998f 2024-12-15T04:40:55,349 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/73f96992fdc04b44b627b72a0665998f, entries=150, sequenceid=167, filesize=11.9 K 2024-12-15T04:40:55,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/206ec7a4ff524dcf9f09fbccadb745ab as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/206ec7a4ff524dcf9f09fbccadb745ab 2024-12-15T04:40:55,352 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/206ec7a4ff524dcf9f09fbccadb745ab, entries=150, sequenceid=167, filesize=11.9 K 2024-12-15T04:40:55,353 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 3aea4ed87c2ca56ea7b1d05fdf98762f in 1259ms, sequenceid=167, compaction requested=false 2024-12-15T04:40:55,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:55,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:55,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-12-15T04:40:55,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-12-15T04:40:55,355 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-12-15T04:40:55,355 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4790 sec 2024-12-15T04:40:55,356 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 2.4810 sec 2024-12-15T04:40:55,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:55,605 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3aea4ed87c2ca56ea7b1d05fdf98762f 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-12-15T04:40:55,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=A 2024-12-15T04:40:55,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:55,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=B 2024-12-15T04:40:55,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:55,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=C 2024-12-15T04:40:55,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:55,611 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:55,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237715609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:55,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215c81e7d971105477ea259dbd3dad38819_3aea4ed87c2ca56ea7b1d05fdf98762f is 50, key is test_row_0/A:col10/1734237655604/Put/seqid=0 2024-12-15T04:40:55,612 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:55,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237715610, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:55,612 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:55,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237715611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:55,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:55,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237715611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:55,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742387_1563 (size=14794) 2024-12-15T04:40:55,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:55,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237715614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:55,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:55,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237715712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:55,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:55,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237715713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:55,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:55,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237715713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:55,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:55,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237715713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:55,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:55,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237715914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:55,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:55,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237715915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:55,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:55,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237715916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:55,917 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:55,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237715916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:56,014 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:56,017 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215c81e7d971105477ea259dbd3dad38819_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215c81e7d971105477ea259dbd3dad38819_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:56,017 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/25311ab5b8da436c81f4adf816bb298a, store: [table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:56,018 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/25311ab5b8da436c81f4adf816bb298a is 175, key is test_row_0/A:col10/1734237655604/Put/seqid=0 2024-12-15T04:40:56,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742388_1564 (size=39749) 2024-12-15T04:40:56,217 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:56,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237716217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:56,220 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:56,220 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:56,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237716219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:56,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237716219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:56,220 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:56,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237716219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:56,421 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=197, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/25311ab5b8da436c81f4adf816bb298a 2024-12-15T04:40:56,426 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/d99448d04c0d44d1815de4ad4ed7c65d is 50, key is test_row_0/B:col10/1734237655604/Put/seqid=0 2024-12-15T04:40:56,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742389_1565 (size=12151) 2024-12-15T04:40:56,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:56,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237716624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:56,722 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:56,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237716720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:56,722 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:56,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237716721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:56,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:56,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237716722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:56,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:56,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237716723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:56,829 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/d99448d04c0d44d1815de4ad4ed7c65d 2024-12-15T04:40:56,856 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/545fb0444087456983ba8c609ad4e5c0 is 50, key is test_row_0/C:col10/1734237655604/Put/seqid=0 2024-12-15T04:40:56,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742390_1566 (size=12151) 2024-12-15T04:40:56,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-15T04:40:56,979 INFO [Thread-2320 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-12-15T04:40:56,980 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:40:56,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-12-15T04:40:56,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-15T04:40:56,981 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:40:56,981 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:40:56,981 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:40:57,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-15T04:40:57,132 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:57,132 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-15T04:40:57,132 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:57,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:57,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:57,133 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:57,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:57,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:57,260 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/545fb0444087456983ba8c609ad4e5c0 2024-12-15T04:40:57,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/25311ab5b8da436c81f4adf816bb298a as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/25311ab5b8da436c81f4adf816bb298a 2024-12-15T04:40:57,267 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/25311ab5b8da436c81f4adf816bb298a, entries=200, sequenceid=197, filesize=38.8 K 2024-12-15T04:40:57,267 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/d99448d04c0d44d1815de4ad4ed7c65d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/d99448d04c0d44d1815de4ad4ed7c65d 2024-12-15T04:40:57,270 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/d99448d04c0d44d1815de4ad4ed7c65d, entries=150, sequenceid=197, filesize=11.9 K 2024-12-15T04:40:57,270 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/545fb0444087456983ba8c609ad4e5c0 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/545fb0444087456983ba8c609ad4e5c0 2024-12-15T04:40:57,273 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/545fb0444087456983ba8c609ad4e5c0, entries=150, sequenceid=197, filesize=11.9 K 2024-12-15T04:40:57,274 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 3aea4ed87c2ca56ea7b1d05fdf98762f in 1668ms, sequenceid=197, compaction requested=true 2024-12-15T04:40:57,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:57,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3aea4ed87c2ca56ea7b1d05fdf98762f:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:40:57,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:57,274 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:57,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3aea4ed87c2ca56ea7b1d05fdf98762f:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:40:57,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:57,274 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:57,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3aea4ed87c2ca56ea7b1d05fdf98762f:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:40:57,274 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:57,276 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:57,276 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102267 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:57,276 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 3aea4ed87c2ca56ea7b1d05fdf98762f/A is initiating minor compaction (all files) 2024-12-15T04:40:57,276 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 3aea4ed87c2ca56ea7b1d05fdf98762f/B is initiating minor compaction (all files) 2024-12-15T04:40:57,276 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3aea4ed87c2ca56ea7b1d05fdf98762f/B in TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:57,276 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3aea4ed87c2ca56ea7b1d05fdf98762f/A in TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:57,276 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/e6816a312873443db70ef89333d0214e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/ae78338ab8d0449f8c4d08fefb1260f1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/25311ab5b8da436c81f4adf816bb298a] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp, totalSize=99.9 K 2024-12-15T04:40:57,276 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/c30114945dac41aa8167041d64677829, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/73f96992fdc04b44b627b72a0665998f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/d99448d04c0d44d1815de4ad4ed7c65d] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp, totalSize=35.9 K 2024-12-15T04:40:57,276 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:57,276 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. files: [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/e6816a312873443db70ef89333d0214e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/ae78338ab8d0449f8c4d08fefb1260f1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/25311ab5b8da436c81f4adf816bb298a] 2024-12-15T04:40:57,277 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting c30114945dac41aa8167041d64677829, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1734237651214 2024-12-15T04:40:57,277 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting e6816a312873443db70ef89333d0214e, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1734237651214 2024-12-15T04:40:57,278 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 73f96992fdc04b44b627b72a0665998f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1734237652330 2024-12-15T04:40:57,278 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae78338ab8d0449f8c4d08fefb1260f1, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1734237652330 2024-12-15T04:40:57,278 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting d99448d04c0d44d1815de4ad4ed7c65d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1734237654490 2024-12-15T04:40:57,278 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 25311ab5b8da436c81f4adf816bb298a, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1734237654490 2024-12-15T04:40:57,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-15T04:40:57,283 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3aea4ed87c2ca56ea7b1d05fdf98762f#B#compaction#471 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:57,283 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:57,283 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/c17f1db4df364815b805505df945a433 is 50, key is test_row_0/B:col10/1734237655604/Put/seqid=0 2024-12-15T04:40:57,284 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:57,284 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-12-15T04:40:57,285 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412155badf504b6b14c0ea06d797ea5046473_3aea4ed87c2ca56ea7b1d05fdf98762f store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:57,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:57,285 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 3aea4ed87c2ca56ea7b1d05fdf98762f 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-12-15T04:40:57,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=A 2024-12-15T04:40:57,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:57,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=B 2024-12-15T04:40:57,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:57,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=C 2024-12-15T04:40:57,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:57,286 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412155badf504b6b14c0ea06d797ea5046473_3aea4ed87c2ca56ea7b1d05fdf98762f, store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:57,286 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412155badf504b6b14c0ea06d797ea5046473_3aea4ed87c2ca56ea7b1d05fdf98762f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:57,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742391_1567 (size=12561) 2024-12-15T04:40:57,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742392_1568 (size=4469) 2024-12-15T04:40:57,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412153ae7bb8208b94591b83f3ae37b724482_3aea4ed87c2ca56ea7b1d05fdf98762f is 50, key is test_row_0/A:col10/1734237655610/Put/seqid=0 2024-12-15T04:40:57,291 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3aea4ed87c2ca56ea7b1d05fdf98762f#A#compaction#472 average throughput is 3.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:57,291 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/e3304fde291c4a089907b9097c810483 is 175, key is test_row_0/A:col10/1734237655604/Put/seqid=0 2024-12-15T04:40:57,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742393_1569 (size=12304) 2024-12-15T04:40:57,293 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/c17f1db4df364815b805505df945a433 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/c17f1db4df364815b805505df945a433 2024-12-15T04:40:57,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742394_1570 (size=31515) 2024-12-15T04:40:57,298 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:57,302 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412153ae7bb8208b94591b83f3ae37b724482_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412153ae7bb8208b94591b83f3ae37b724482_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:57,302 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3aea4ed87c2ca56ea7b1d05fdf98762f/B of 3aea4ed87c2ca56ea7b1d05fdf98762f into c17f1db4df364815b805505df945a433(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:57,302 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:57,302 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f., storeName=3aea4ed87c2ca56ea7b1d05fdf98762f/B, priority=13, startTime=1734237657274; duration=0sec 2024-12-15T04:40:57,302 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:57,302 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3aea4ed87c2ca56ea7b1d05fdf98762f:B 2024-12-15T04:40:57,302 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:57,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/74d555b74ddc4c01b773a755d48c41d4, store: [table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:57,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/74d555b74ddc4c01b773a755d48c41d4 is 175, key is test_row_0/A:col10/1734237655610/Put/seqid=0 2024-12-15T04:40:57,304 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:57,305 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 3aea4ed87c2ca56ea7b1d05fdf98762f/C is initiating minor compaction (all files) 2024-12-15T04:40:57,305 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3aea4ed87c2ca56ea7b1d05fdf98762f/C in TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:57,305 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/6fbf055e384b47d9b3047810a0e6c50f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/206ec7a4ff524dcf9f09fbccadb745ab, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/545fb0444087456983ba8c609ad4e5c0] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp, totalSize=35.9 K 2024-12-15T04:40:57,305 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 6fbf055e384b47d9b3047810a0e6c50f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1734237651214 2024-12-15T04:40:57,306 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 206ec7a4ff524dcf9f09fbccadb745ab, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1734237652330 2024-12-15T04:40:57,306 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 545fb0444087456983ba8c609ad4e5c0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1734237654490 2024-12-15T04:40:57,311 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3aea4ed87c2ca56ea7b1d05fdf98762f#C#compaction#474 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:57,311 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/6d2e5fdfd4f140c7b24a84d43e9e632c is 50, key is test_row_0/C:col10/1734237655604/Put/seqid=0 2024-12-15T04:40:57,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742395_1571 (size=31105) 2024-12-15T04:40:57,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742396_1572 (size=12561) 2024-12-15T04:40:57,318 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/6d2e5fdfd4f140c7b24a84d43e9e632c as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/6d2e5fdfd4f140c7b24a84d43e9e632c 2024-12-15T04:40:57,322 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3aea4ed87c2ca56ea7b1d05fdf98762f/C of 3aea4ed87c2ca56ea7b1d05fdf98762f into 6d2e5fdfd4f140c7b24a84d43e9e632c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:57,322 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:57,322 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f., storeName=3aea4ed87c2ca56ea7b1d05fdf98762f/C, priority=13, startTime=1734237657274; duration=0sec 2024-12-15T04:40:57,322 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:57,322 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3aea4ed87c2ca56ea7b1d05fdf98762f:C 2024-12-15T04:40:57,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-15T04:40:57,699 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/e3304fde291c4a089907b9097c810483 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/e3304fde291c4a089907b9097c810483 2024-12-15T04:40:57,703 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3aea4ed87c2ca56ea7b1d05fdf98762f/A of 3aea4ed87c2ca56ea7b1d05fdf98762f into e3304fde291c4a089907b9097c810483(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:57,703 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:57,703 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f., storeName=3aea4ed87c2ca56ea7b1d05fdf98762f/A, priority=13, startTime=1734237657274; duration=0sec 2024-12-15T04:40:57,703 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:57,703 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3aea4ed87c2ca56ea7b1d05fdf98762f:A 2024-12-15T04:40:57,713 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=207, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/74d555b74ddc4c01b773a755d48c41d4 2024-12-15T04:40:57,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/72d4e6abc2614bbd9ca61928945196d8 is 50, key is test_row_0/B:col10/1734237655610/Put/seqid=0 2024-12-15T04:40:57,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:57,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:57,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742397_1573 (size=12151) 2024-12-15T04:40:57,737 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/72d4e6abc2614bbd9ca61928945196d8 2024-12-15T04:40:57,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:57,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237717748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:57,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:57,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237717749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:57,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:57,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237717749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:57,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:57,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237717749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:57,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/c6c92f03d0934c81803bf30fda08d29c is 50, key is test_row_0/C:col10/1734237655610/Put/seqid=0 2024-12-15T04:40:57,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742398_1574 (size=12151) 2024-12-15T04:40:57,760 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/c6c92f03d0934c81803bf30fda08d29c 2024-12-15T04:40:57,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/74d555b74ddc4c01b773a755d48c41d4 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/74d555b74ddc4c01b773a755d48c41d4 2024-12-15T04:40:57,767 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/74d555b74ddc4c01b773a755d48c41d4, entries=150, sequenceid=207, filesize=30.4 K 2024-12-15T04:40:57,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/72d4e6abc2614bbd9ca61928945196d8 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/72d4e6abc2614bbd9ca61928945196d8 2024-12-15T04:40:57,770 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/72d4e6abc2614bbd9ca61928945196d8, entries=150, sequenceid=207, filesize=11.9 K 2024-12-15T04:40:57,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/c6c92f03d0934c81803bf30fda08d29c as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/c6c92f03d0934c81803bf30fda08d29c 2024-12-15T04:40:57,774 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/c6c92f03d0934c81803bf30fda08d29c, entries=150, sequenceid=207, filesize=11.9 K 2024-12-15T04:40:57,774 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for 3aea4ed87c2ca56ea7b1d05fdf98762f in 489ms, sequenceid=207, compaction requested=false 2024-12-15T04:40:57,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:57,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:57,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-12-15T04:40:57,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-12-15T04:40:57,776 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-12-15T04:40:57,776 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 794 msec 2024-12-15T04:40:57,777 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 797 msec 2024-12-15T04:40:57,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:57,852 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3aea4ed87c2ca56ea7b1d05fdf98762f 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-12-15T04:40:57,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=A 2024-12-15T04:40:57,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:57,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=B 2024-12-15T04:40:57,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:57,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=C 2024-12-15T04:40:57,852 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:57,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:57,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:57,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237717856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:57,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237717857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:57,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:57,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237717857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:57,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:57,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237717858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:57,860 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121582d1afe7a6fd4d70b0eaa6f11ca43ed3_3aea4ed87c2ca56ea7b1d05fdf98762f is 50, key is test_row_0/A:col10/1734237657851/Put/seqid=0 2024-12-15T04:40:57,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742399_1575 (size=12304) 2024-12-15T04:40:57,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:57,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237717959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:57,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:57,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237717959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:57,960 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:57,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237717959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:57,962 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:57,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237717961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:58,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-12-15T04:40:58,083 INFO [Thread-2320 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-12-15T04:40:58,084 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:40:58,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-12-15T04:40:58,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-15T04:40:58,085 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:40:58,086 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:40:58,086 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:40:58,162 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:58,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:58,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237718161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:58,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237718161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:58,163 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:58,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237718162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:58,164 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:58,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237718163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:58,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-15T04:40:58,236 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:58,237 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-15T04:40:58,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:58,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:58,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:58,237 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:58,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:58,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:58,264 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:58,266 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121582d1afe7a6fd4d70b0eaa6f11ca43ed3_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121582d1afe7a6fd4d70b0eaa6f11ca43ed3_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:58,268 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/9e8ea87539be40ef9fa8867fa64a45eb, store: [table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:58,268 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/9e8ea87539be40ef9fa8867fa64a45eb is 175, key is test_row_0/A:col10/1734237657851/Put/seqid=0 2024-12-15T04:40:58,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742400_1576 (size=31105) 2024-12-15T04:40:58,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-15T04:40:58,389 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:58,389 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-15T04:40:58,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:58,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:58,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:58,389 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:58,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:58,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:58,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:58,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237718463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:58,465 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:58,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237718464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:58,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:58,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237718465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:58,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:58,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237718467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:58,540 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:58,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-15T04:40:58,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:58,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:58,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:58,540 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:58,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:58,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:58,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:58,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237718641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:58,642 DEBUG [Thread-2318 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4142 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f., hostname=e56de37b85b3,43199,1734237482035, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:40:58,671 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=238, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/9e8ea87539be40ef9fa8867fa64a45eb 2024-12-15T04:40:58,676 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/9bdc354f44d94d75a6a59fc37a6b318d is 50, key is test_row_0/B:col10/1734237657851/Put/seqid=0 2024-12-15T04:40:58,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-15T04:40:58,692 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:58,692 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-15T04:40:58,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:58,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:58,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:58,693 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:58,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:58,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:58,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742401_1577 (size=12151) 2024-12-15T04:40:58,844 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:58,845 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-15T04:40:58,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:58,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:58,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:58,845 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:58,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:58,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:58,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:58,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237718965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:58,970 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:58,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237718969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:58,971 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:58,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237718970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:58,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:40:58,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237718971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:40:58,996 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:58,997 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-15T04:40:58,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:58,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:58,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:58,997 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:58,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:58,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:59,096 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/9bdc354f44d94d75a6a59fc37a6b318d 2024-12-15T04:40:59,100 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/5bdebc7e23dc475d88beb181d6d02b84 is 50, key is test_row_0/C:col10/1734237657851/Put/seqid=0 2024-12-15T04:40:59,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742402_1578 (size=12151) 2024-12-15T04:40:59,148 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:59,149 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-15T04:40:59,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:59,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:59,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:59,149 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:59,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:59,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:59,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-15T04:40:59,301 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:59,301 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-15T04:40:59,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:59,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:59,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:59,301 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:59,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:59,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:59,453 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:59,453 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-15T04:40:59,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:59,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:40:59,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:59,454 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:59,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:59,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:40:59,503 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/5bdebc7e23dc475d88beb181d6d02b84 2024-12-15T04:40:59,507 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/9e8ea87539be40ef9fa8867fa64a45eb as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/9e8ea87539be40ef9fa8867fa64a45eb 2024-12-15T04:40:59,509 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/9e8ea87539be40ef9fa8867fa64a45eb, entries=150, sequenceid=238, filesize=30.4 K 2024-12-15T04:40:59,510 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/9bdc354f44d94d75a6a59fc37a6b318d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/9bdc354f44d94d75a6a59fc37a6b318d 2024-12-15T04:40:59,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,513 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/9bdc354f44d94d75a6a59fc37a6b318d, entries=150, sequenceid=238, filesize=11.9 K 2024-12-15T04:40:59,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,513 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/5bdebc7e23dc475d88beb181d6d02b84 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/5bdebc7e23dc475d88beb181d6d02b84 2024-12-15T04:40:59,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,516 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/5bdebc7e23dc475d88beb181d6d02b84, entries=150, sequenceid=238, filesize=11.9 K 2024-12-15T04:40:59,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,516 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 3aea4ed87c2ca56ea7b1d05fdf98762f in 1664ms, sequenceid=238, compaction requested=true 2024-12-15T04:40:59,517 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:59,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3aea4ed87c2ca56ea7b1d05fdf98762f:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:40:59,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:59,517 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:59,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3aea4ed87c2ca56ea7b1d05fdf98762f:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:40:59,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:40:59,517 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:40:59,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3aea4ed87c2ca56ea7b1d05fdf98762f:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:40:59,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,517 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:59,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,517 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93725 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:59,517 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:40:59,517 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 3aea4ed87c2ca56ea7b1d05fdf98762f/B is initiating minor compaction (all files) 2024-12-15T04:40:59,517 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 3aea4ed87c2ca56ea7b1d05fdf98762f/A is initiating minor compaction (all files) 2024-12-15T04:40:59,518 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3aea4ed87c2ca56ea7b1d05fdf98762f/B in TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:59,518 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3aea4ed87c2ca56ea7b1d05fdf98762f/A in TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:59,518 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/e3304fde291c4a089907b9097c810483, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/74d555b74ddc4c01b773a755d48c41d4, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/9e8ea87539be40ef9fa8867fa64a45eb] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp, totalSize=91.5 K 2024-12-15T04:40:59,518 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/c17f1db4df364815b805505df945a433, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/72d4e6abc2614bbd9ca61928945196d8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/9bdc354f44d94d75a6a59fc37a6b318d] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp, totalSize=36.0 K 2024-12-15T04:40:59,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,518 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:59,518 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. files: [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/e3304fde291c4a089907b9097c810483, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/74d555b74ddc4c01b773a755d48c41d4, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/9e8ea87539be40ef9fa8867fa64a45eb] 2024-12-15T04:40:59,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,518 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting c17f1db4df364815b805505df945a433, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1734237654490 2024-12-15T04:40:59,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,518 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3304fde291c4a089907b9097c810483, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1734237654490 2024-12-15T04:40:59,518 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 72d4e6abc2614bbd9ca61928945196d8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734237655607 2024-12-15T04:40:59,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,518 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 74d555b74ddc4c01b773a755d48c41d4, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734237655607 2024-12-15T04:40:59,518 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 9bdc354f44d94d75a6a59fc37a6b318d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1734237657748 2024-12-15T04:40:59,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,518 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9e8ea87539be40ef9fa8867fa64a45eb, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1734237657748 2024-12-15T04:40:59,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,523 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:59,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,525 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3aea4ed87c2ca56ea7b1d05fdf98762f#B#compaction#481 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:59,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,525 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/523498be993f46c98e536dac6e4f37cd is 50, key is test_row_0/B:col10/1734237657851/Put/seqid=0 2024-12-15T04:40:59,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,529 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202412151566404fd8af4f81a1f6d06346738ade_3aea4ed87c2ca56ea7b1d05fdf98762f store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:59,531 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202412151566404fd8af4f81a1f6d06346738ade_3aea4ed87c2ca56ea7b1d05fdf98762f, store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:59,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,531 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412151566404fd8af4f81a1f6d06346738ade_3aea4ed87c2ca56ea7b1d05fdf98762f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:59,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742403_1579 (size=12663) 2024-12-15T04:40:59,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742404_1580 (size=4469) 2024-12-15T04:40:59,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,605 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:40:59,606 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-12-15T04:40:59,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:59,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,606 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing 3aea4ed87c2ca56ea7b1d05fdf98762f 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-12-15T04:40:59,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=A 2024-12-15T04:40:59,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:59,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=B 2024-12-15T04:40:59,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:59,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=C 2024-12-15T04:40:59,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:40:59,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215a935ac18ea924f9aadf1d9a32ed1fe2e_3aea4ed87c2ca56ea7b1d05fdf98762f is 50, key is test_row_0/A:col10/1734237657858/Put/seqid=0 2024-12-15T04:40:59,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742405_1581 (size=9814) 2024-12-15T04:40:59,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,649 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,652 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215a935ac18ea924f9aadf1d9a32ed1fe2e_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215a935ac18ea924f9aadf1d9a32ed1fe2e_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:40:59,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/db1ed16784784a51a29beb8c848e6a50, store: [table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:40:59,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/db1ed16784784a51a29beb8c848e6a50 is 175, key is test_row_0/A:col10/1734237657858/Put/seqid=0 2024-12-15T04:40:59,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742406_1582 (size=22461) 2024-12-15T04:40:59,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,673 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=246, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/db1ed16784784a51a29beb8c848e6a50 2024-12-15T04:40:59,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/bfe08495cbe6472cb7ff9ae905d4da39 is 50, key is test_row_0/B:col10/1734237657858/Put/seqid=0 2024-12-15T04:40:59,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742407_1583 (size=9757) 2024-12-15T04:40:59,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,685 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/bfe08495cbe6472cb7ff9ae905d4da39 2024-12-15T04:40:59,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/5c1892a2173f46cb947aad45ea1fc947 is 50, key is test_row_0/C:col10/1734237657858/Put/seqid=0 2024-12-15T04:40:59,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742408_1584 (size=9757) 2024-12-15T04:40:59,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,712 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/5c1892a2173f46cb947aad45ea1fc947 2024-12-15T04:40:59,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/db1ed16784784a51a29beb8c848e6a50 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/db1ed16784784a51a29beb8c848e6a50 2024-12-15T04:40:59,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,720 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/db1ed16784784a51a29beb8c848e6a50, entries=100, sequenceid=246, filesize=21.9 K 2024-12-15T04:40:59,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/bfe08495cbe6472cb7ff9ae905d4da39 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/bfe08495cbe6472cb7ff9ae905d4da39 2024-12-15T04:40:59,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,724 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/bfe08495cbe6472cb7ff9ae905d4da39, entries=100, sequenceid=246, filesize=9.5 K 2024-12-15T04:40:59,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/5c1892a2173f46cb947aad45ea1fc947 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/5c1892a2173f46cb947aad45ea1fc947 2024-12-15T04:40:59,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,728 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/5c1892a2173f46cb947aad45ea1fc947, entries=100, sequenceid=246, filesize=9.5 K 2024-12-15T04:40:59,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,729 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 3aea4ed87c2ca56ea7b1d05fdf98762f in 123ms, sequenceid=246, compaction requested=true 2024-12-15T04:40:59,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:59,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:59,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-12-15T04:40:59,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-12-15T04:40:59,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,731 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-12-15T04:40:59,731 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6440 sec 2024-12-15T04:40:59,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,733 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 1.6480 sec 2024-12-15T04:40:59,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,936 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/523498be993f46c98e536dac6e4f37cd as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/523498be993f46c98e536dac6e4f37cd 2024-12-15T04:40:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,940 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3aea4ed87c2ca56ea7b1d05fdf98762f#A#compaction#480 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,940 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/edc94ddbe9084fe6b5982786b5214031 is 175, key is test_row_0/A:col10/1734237657851/Put/seqid=0 2024-12-15T04:40:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,942 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3aea4ed87c2ca56ea7b1d05fdf98762f/B of 3aea4ed87c2ca56ea7b1d05fdf98762f into 523498be993f46c98e536dac6e4f37cd(size=12.4 K), total size for store is 21.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:40:59,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,942 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:40:59,942 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f., storeName=3aea4ed87c2ca56ea7b1d05fdf98762f/B, priority=13, startTime=1734237659517; duration=0sec 2024-12-15T04:40:59,942 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:40:59,942 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3aea4ed87c2ca56ea7b1d05fdf98762f:B 2024-12-15T04:40:59,942 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-15T04:40:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,944 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46620 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-15T04:40:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,944 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 3aea4ed87c2ca56ea7b1d05fdf98762f/C is initiating minor compaction (all files) 2024-12-15T04:40:59,944 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3aea4ed87c2ca56ea7b1d05fdf98762f/C in TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:40:59,944 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/6d2e5fdfd4f140c7b24a84d43e9e632c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/c6c92f03d0934c81803bf30fda08d29c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/5bdebc7e23dc475d88beb181d6d02b84, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/5c1892a2173f46cb947aad45ea1fc947] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp, totalSize=45.5 K 2024-12-15T04:40:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,944 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d2e5fdfd4f140c7b24a84d43e9e632c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1734237654490 2024-12-15T04:40:59,944 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting c6c92f03d0934c81803bf30fda08d29c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1734237655607 2024-12-15T04:40:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,945 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 5bdebc7e23dc475d88beb181d6d02b84, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1734237657748 2024-12-15T04:40:59,945 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c1892a2173f46cb947aad45ea1fc947, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1734237657853 2024-12-15T04:40:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742409_1585 (size=31617) 2024-12-15T04:40:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,965 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3aea4ed87c2ca56ea7b1d05fdf98762f#C#compaction#485 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:40:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,965 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/5356cdedce084654aba027742293e1e7 is 50, key is test_row_0/C:col10/1734237657858/Put/seqid=0 2024-12-15T04:40:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742410_1586 (size=12697) 2024-12-15T04:40:59,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:40:59,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:41:00,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:41:00,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:00,001 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3aea4ed87c2ca56ea7b1d05fdf98762f 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-12-15T04:41:00,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=A 2024-12-15T04:41:00,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:41:00,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=B 2024-12-15T04:41:00,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:41:00,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=C 2024-12-15T04:41:00,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:41:00,007 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215cea4176499714d548985bcdc2ebaba91_3aea4ed87c2ca56ea7b1d05fdf98762f is 50, key is test_row_0/A:col10/1734237660000/Put/seqid=0 2024-12-15T04:41:00,028 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:00,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237720023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:00,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742411_1587 (size=12404) 2024-12-15T04:41:00,028 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:00,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237720023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:00,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:00,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237720027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:00,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:00,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237720028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:00,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:00,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237720129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:00,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:00,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237720129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:00,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:00,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237720131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:00,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:00,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237720131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:00,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-12-15T04:41:00,189 INFO [Thread-2320 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-12-15T04:41:00,190 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-12-15T04:41:00,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-12-15T04:41:00,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-15T04:41:00,191 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-12-15T04:41:00,191 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-15T04:41:00,191 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-15T04:41:00,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-15T04:41:00,333 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:00,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237720331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:00,333 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:00,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237720332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:00,335 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:00,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237720333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:00,336 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:00,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237720334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:00,342 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:41:00,342 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-15T04:41:00,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:00,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:41:00,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:00,343 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:00,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:00,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:00,356 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/edc94ddbe9084fe6b5982786b5214031 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/edc94ddbe9084fe6b5982786b5214031 2024-12-15T04:41:00,359 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3aea4ed87c2ca56ea7b1d05fdf98762f/A of 3aea4ed87c2ca56ea7b1d05fdf98762f into edc94ddbe9084fe6b5982786b5214031(size=30.9 K), total size for store is 52.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:41:00,359 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:41:00,359 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f., storeName=3aea4ed87c2ca56ea7b1d05fdf98762f/A, priority=13, startTime=1734237659517; duration=0sec 2024-12-15T04:41:00,359 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:41:00,359 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3aea4ed87c2ca56ea7b1d05fdf98762f:A 2024-12-15T04:41:00,380 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/5356cdedce084654aba027742293e1e7 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/5356cdedce084654aba027742293e1e7 2024-12-15T04:41:00,383 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3aea4ed87c2ca56ea7b1d05fdf98762f/C of 3aea4ed87c2ca56ea7b1d05fdf98762f into 5356cdedce084654aba027742293e1e7(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:41:00,383 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:41:00,383 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f., storeName=3aea4ed87c2ca56ea7b1d05fdf98762f/C, priority=12, startTime=1734237659517; duration=0sec 2024-12-15T04:41:00,383 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:41:00,383 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3aea4ed87c2ca56ea7b1d05fdf98762f:C 2024-12-15T04:41:00,429 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:41:00,431 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215cea4176499714d548985bcdc2ebaba91_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215cea4176499714d548985bcdc2ebaba91_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:00,432 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/813dbf433b16463b8a69712f783cf944, store: [table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:41:00,433 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/813dbf433b16463b8a69712f783cf944 is 175, key is test_row_0/A:col10/1734237660000/Put/seqid=0 2024-12-15T04:41:00,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742412_1588 (size=31205) 2024-12-15T04:41:00,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-15T04:41:00,494 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:41:00,494 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-15T04:41:00,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:00,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:41:00,494 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:00,495 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:00,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:00,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:00,541 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-15T04:41:00,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:00,636 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:00,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237720634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:00,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237720635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:00,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:00,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237720637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:00,639 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:00,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237720638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:00,646 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:41:00,646 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-15T04:41:00,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:00,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:41:00,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:00,646 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:00,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:00,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:00,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-15T04:41:00,798 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:41:00,798 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-15T04:41:00,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:00,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:41:00,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:00,799 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:00,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:00,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:00,836 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=258, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/813dbf433b16463b8a69712f783cf944 2024-12-15T04:41:00,841 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/5e7734fa8da34e3b89a3a00fcdb3d507 is 50, key is test_row_0/B:col10/1734237660000/Put/seqid=0 2024-12-15T04:41:00,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742413_1589 (size=12251) 2024-12-15T04:41:00,950 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:41:00,950 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-15T04:41:00,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:00,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:41:00,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:00,951 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:00,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:00,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:01,102 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:41:01,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-15T04:41:01,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:01,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:41:01,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:01,103 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:01,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:01,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:01,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:01,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237721139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:01,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:01,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237721139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:01,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:01,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237721141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:01,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:01,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237721143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:01,244 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/5e7734fa8da34e3b89a3a00fcdb3d507 2024-12-15T04:41:01,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/ff88905101094519b9b7205ce02d78b8 is 50, key is test_row_0/C:col10/1734237660000/Put/seqid=0 2024-12-15T04:41:01,251 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742414_1590 (size=12251) 2024-12-15T04:41:01,254 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:41:01,255 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-15T04:41:01,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:01,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:41:01,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:01,255 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:01,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:01,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:01,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-15T04:41:01,406 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:41:01,407 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-15T04:41:01,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:01,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:41:01,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:01,408 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:01,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:01,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:01,559 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:41:01,559 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-15T04:41:01,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:01,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:41:01,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:01,560 ERROR [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:01,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:01,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-15T04:41:01,652 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/ff88905101094519b9b7205ce02d78b8 2024-12-15T04:41:01,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/813dbf433b16463b8a69712f783cf944 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/813dbf433b16463b8a69712f783cf944 2024-12-15T04:41:01,657 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/813dbf433b16463b8a69712f783cf944, entries=150, sequenceid=258, filesize=30.5 K 2024-12-15T04:41:01,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/5e7734fa8da34e3b89a3a00fcdb3d507 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/5e7734fa8da34e3b89a3a00fcdb3d507 2024-12-15T04:41:01,660 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/5e7734fa8da34e3b89a3a00fcdb3d507, entries=150, sequenceid=258, filesize=12.0 K 2024-12-15T04:41:01,660 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/ff88905101094519b9b7205ce02d78b8 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/ff88905101094519b9b7205ce02d78b8 2024-12-15T04:41:01,663 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/ff88905101094519b9b7205ce02d78b8, entries=150, sequenceid=258, filesize=12.0 K 2024-12-15T04:41:01,664 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 3aea4ed87c2ca56ea7b1d05fdf98762f in 1662ms, sequenceid=258, compaction requested=true 2024-12-15T04:41:01,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:41:01,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3aea4ed87c2ca56ea7b1d05fdf98762f:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:41:01,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:41:01,664 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:41:01,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3aea4ed87c2ca56ea7b1d05fdf98762f:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:41:01,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:41:01,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3aea4ed87c2ca56ea7b1d05fdf98762f:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:41:01,664 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:41:01,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:41:01,664 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85283 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:41:01,664 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 3aea4ed87c2ca56ea7b1d05fdf98762f/A is initiating minor compaction (all files) 2024-12-15T04:41:01,664 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3aea4ed87c2ca56ea7b1d05fdf98762f/A in TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:01,665 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/edc94ddbe9084fe6b5982786b5214031, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/db1ed16784784a51a29beb8c848e6a50, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/813dbf433b16463b8a69712f783cf944] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp, totalSize=83.3 K 2024-12-15T04:41:01,665 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:01,665 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. files: [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/edc94ddbe9084fe6b5982786b5214031, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/db1ed16784784a51a29beb8c848e6a50, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/813dbf433b16463b8a69712f783cf944] 2024-12-15T04:41:01,665 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34671 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:41:01,665 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting edc94ddbe9084fe6b5982786b5214031, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1734237657748 2024-12-15T04:41:01,665 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 3aea4ed87c2ca56ea7b1d05fdf98762f/B is initiating minor compaction (all files) 2024-12-15T04:41:01,665 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3aea4ed87c2ca56ea7b1d05fdf98762f/B in TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:01,665 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/523498be993f46c98e536dac6e4f37cd, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/bfe08495cbe6472cb7ff9ae905d4da39, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/5e7734fa8da34e3b89a3a00fcdb3d507] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp, totalSize=33.9 K 2024-12-15T04:41:01,665 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting db1ed16784784a51a29beb8c848e6a50, keycount=100, bloomtype=ROW, size=21.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1734237657853 2024-12-15T04:41:01,665 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 523498be993f46c98e536dac6e4f37cd, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1734237657748 2024-12-15T04:41:01,665 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 813dbf433b16463b8a69712f783cf944, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1734237659997 2024-12-15T04:41:01,665 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting bfe08495cbe6472cb7ff9ae905d4da39, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1734237657853 2024-12-15T04:41:01,666 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e7734fa8da34e3b89a3a00fcdb3d507, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1734237659997 2024-12-15T04:41:01,669 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:41:01,670 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3aea4ed87c2ca56ea7b1d05fdf98762f#B#compaction#489 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:41:01,670 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/34e061923d65442d90fef23347fc5c02 is 50, key is test_row_0/B:col10/1734237660000/Put/seqid=0 2024-12-15T04:41:01,671 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241215fec3ecb3f8a644bbb3badb0c310672c2_3aea4ed87c2ca56ea7b1d05fdf98762f store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:41:01,672 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241215fec3ecb3f8a644bbb3badb0c310672c2_3aea4ed87c2ca56ea7b1d05fdf98762f, store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:41:01,672 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215fec3ecb3f8a644bbb3badb0c310672c2_3aea4ed87c2ca56ea7b1d05fdf98762f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:41:01,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742415_1591 (size=12865) 2024-12-15T04:41:01,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742416_1592 (size=4469) 2024-12-15T04:41:01,711 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:41:01,711 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43199 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-12-15T04:41:01,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:01,712 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing 3aea4ed87c2ca56ea7b1d05fdf98762f 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-12-15T04:41:01,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=A 2024-12-15T04:41:01,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:41:01,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=B 2024-12-15T04:41:01,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:41:01,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=C 2024-12-15T04:41:01,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:41:01,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412154d055555fdfb442b9524014f931d4d91_3aea4ed87c2ca56ea7b1d05fdf98762f is 50, key is test_row_0/A:col10/1734237660021/Put/seqid=0 2024-12-15T04:41:01,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742417_1593 (size=12454) 2024-12-15T04:41:02,081 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/34e061923d65442d90fef23347fc5c02 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/34e061923d65442d90fef23347fc5c02 2024-12-15T04:41:02,081 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3aea4ed87c2ca56ea7b1d05fdf98762f#A#compaction#490 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:41:02,081 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/9f94ffd5c36f4e9fa6451099135d9591 is 175, key is test_row_0/A:col10/1734237660000/Put/seqid=0 2024-12-15T04:41:02,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742418_1594 (size=31819) 2024-12-15T04:41:02,085 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3aea4ed87c2ca56ea7b1d05fdf98762f/B of 3aea4ed87c2ca56ea7b1d05fdf98762f into 34e061923d65442d90fef23347fc5c02(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:41:02,085 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:41:02,085 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f., storeName=3aea4ed87c2ca56ea7b1d05fdf98762f/B, priority=13, startTime=1734237661664; duration=0sec 2024-12-15T04:41:02,085 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:41:02,085 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3aea4ed87c2ca56ea7b1d05fdf98762f:B 2024-12-15T04:41:02,086 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-12-15T04:41:02,086 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-12-15T04:41:02,086 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-12-15T04:41:02,086 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. because compaction request was cancelled 2024-12-15T04:41:02,086 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3aea4ed87c2ca56ea7b1d05fdf98762f:C 2024-12-15T04:41:02,088 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/9f94ffd5c36f4e9fa6451099135d9591 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/9f94ffd5c36f4e9fa6451099135d9591 2024-12-15T04:41:02,092 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3aea4ed87c2ca56ea7b1d05fdf98762f/A of 3aea4ed87c2ca56ea7b1d05fdf98762f into 9f94ffd5c36f4e9fa6451099135d9591(size=31.1 K), total size for store is 31.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:41:02,092 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:41:02,092 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f., storeName=3aea4ed87c2ca56ea7b1d05fdf98762f/A, priority=13, startTime=1734237661664; duration=0sec 2024-12-15T04:41:02,092 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:41:02,092 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3aea4ed87c2ca56ea7b1d05fdf98762f:A 2024-12-15T04:41:02,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:41:02,122 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202412154d055555fdfb442b9524014f931d4d91_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412154d055555fdfb442b9524014f931d4d91_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:02,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/08b35907c72f4ab6b2f1f585f6f492fa, store: [table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:41:02,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/08b35907c72f4ab6b2f1f585f6f492fa is 175, key is test_row_0/A:col10/1734237660021/Put/seqid=0 2024-12-15T04:41:02,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742419_1595 (size=31255) 2024-12-15T04:41:02,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:02,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. as already flushing 2024-12-15T04:41:02,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:02,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237722152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:02,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:02,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33482 deadline: 1734237722153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:02,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:02,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237722153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:02,154 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:02,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237722153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:02,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:02,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237722255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:02,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:02,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237722255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:02,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:02,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237722255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:02,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-15T04:41:02,449 DEBUG [Thread-2327 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x15b6349f to 127.0.0.1:55935 2024-12-15T04:41:02,449 DEBUG [Thread-2327 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:41:02,449 DEBUG [Thread-2321 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58460ef3 to 127.0.0.1:55935 2024-12-15T04:41:02,449 DEBUG [Thread-2321 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:41:02,450 DEBUG [Thread-2323 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6e8cd1ae to 127.0.0.1:55935 2024-12-15T04:41:02,450 DEBUG [Thread-2323 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:41:02,450 DEBUG [Thread-2329 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x439b60d5 to 127.0.0.1:55935 2024-12-15T04:41:02,450 DEBUG [Thread-2329 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:41:02,451 DEBUG [Thread-2325 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4d832d43 to 127.0.0.1:55935 2024-12-15T04:41:02,451 DEBUG [Thread-2325 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:41:02,457 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:02,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237722457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:02,457 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:02,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237722457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:02,458 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:02,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237722458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:02,528 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=285, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/08b35907c72f4ab6b2f1f585f6f492fa 2024-12-15T04:41:02,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/b255e649c193491f859db79e504b048f is 50, key is test_row_0/B:col10/1734237660021/Put/seqid=0 2024-12-15T04:41:02,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742420_1596 (size=12301) 2024-12-15T04:41:02,673 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:02,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33504 deadline: 1734237722673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:02,675 DEBUG [Thread-2318 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8174 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f., hostname=e56de37b85b3,43199,1734237482035, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-15T04:41:02,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:02,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237722759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:02,760 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:02,760 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:02,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237722759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:02,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237722760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:02,942 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/b255e649c193491f859db79e504b048f 2024-12-15T04:41:02,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/d71cd4d779904bdd84721d217d8adbec is 50, key is test_row_0/C:col10/1734237660021/Put/seqid=0 2024-12-15T04:41:02,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742421_1597 (size=12301) 2024-12-15T04:41:03,264 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:03,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33516 deadline: 1734237723264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:03,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:03,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33476 deadline: 1734237723265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:03,267 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-15T04:41:03,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33524 deadline: 1734237723267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:03,358 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/d71cd4d779904bdd84721d217d8adbec 2024-12-15T04:41:03,361 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/08b35907c72f4ab6b2f1f585f6f492fa as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/08b35907c72f4ab6b2f1f585f6f492fa 2024-12-15T04:41:03,364 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/08b35907c72f4ab6b2f1f585f6f492fa, entries=150, sequenceid=285, filesize=30.5 K 2024-12-15T04:41:03,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/b255e649c193491f859db79e504b048f as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/b255e649c193491f859db79e504b048f 2024-12-15T04:41:03,367 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/b255e649c193491f859db79e504b048f, entries=150, sequenceid=285, filesize=12.0 K 2024-12-15T04:41:03,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/d71cd4d779904bdd84721d217d8adbec as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/d71cd4d779904bdd84721d217d8adbec 2024-12-15T04:41:03,370 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/d71cd4d779904bdd84721d217d8adbec, entries=150, sequenceid=285, filesize=12.0 K 2024-12-15T04:41:03,371 INFO [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 3aea4ed87c2ca56ea7b1d05fdf98762f in 1660ms, sequenceid=285, compaction requested=true 2024-12-15T04:41:03,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:41:03,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:03,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/e56de37b85b3:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-12-15T04:41:03,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-12-15T04:41:03,373 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-12-15T04:41:03,373 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.1810 sec 2024-12-15T04:41:03,374 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 3.1830 sec 2024-12-15T04:41:04,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=43199 {}] regionserver.HRegion(8581): Flush requested on 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:04,167 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3aea4ed87c2ca56ea7b1d05fdf98762f 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-12-15T04:41:04,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=A 2024-12-15T04:41:04,167 DEBUG [Thread-2316 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5d836f78 to 127.0.0.1:55935 2024-12-15T04:41:04,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:41:04,167 DEBUG [Thread-2316 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:41:04,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=B 2024-12-15T04:41:04,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:41:04,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=C 2024-12-15T04:41:04,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:41:04,177 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215a16f196678384850ada6cff2d5ff3734_3aea4ed87c2ca56ea7b1d05fdf98762f is 50, key is test_row_0/A:col10/1734237662149/Put/seqid=0 2024-12-15T04:41:04,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742422_1598 (size=12454) 2024-12-15T04:41:04,273 DEBUG [Thread-2312 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2d7fe431 to 127.0.0.1:55935 2024-12-15T04:41:04,273 DEBUG [Thread-2312 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:41:04,276 DEBUG [Thread-2310 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11a52cdf to 127.0.0.1:55935 2024-12-15T04:41:04,276 DEBUG [Thread-2310 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:41:04,280 DEBUG [Thread-2314 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x091d72db to 127.0.0.1:55935 2024-12-15T04:41:04,281 DEBUG [Thread-2314 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:41:04,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-15T04:41:04,295 INFO [Thread-2320 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-12-15T04:41:04,582 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:41:04,585 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215a16f196678384850ada6cff2d5ff3734_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215a16f196678384850ada6cff2d5ff3734_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:04,586 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/0c62452ba27343779edc557b40897250, store: [table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:41:04,586 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/0c62452ba27343779edc557b40897250 is 175, key is test_row_0/A:col10/1734237662149/Put/seqid=0 2024-12-15T04:41:04,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742423_1599 (size=31255) 2024-12-15T04:41:04,992 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=299, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/0c62452ba27343779edc557b40897250 2024-12-15T04:41:05,004 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/b2029f2264aa4905ae8e951038539dc2 is 50, key is test_row_0/B:col10/1734237662149/Put/seqid=0 2024-12-15T04:41:05,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742424_1600 (size=12301) 2024-12-15T04:41:05,409 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/b2029f2264aa4905ae8e951038539dc2 2024-12-15T04:41:05,420 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/08467e57358e4cb1983e3a9dc57521a6 is 50, key is test_row_0/C:col10/1734237662149/Put/seqid=0 2024-12-15T04:41:05,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742425_1601 (size=12301) 2024-12-15T04:41:05,826 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=299 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/08467e57358e4cb1983e3a9dc57521a6 2024-12-15T04:41:05,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/0c62452ba27343779edc557b40897250 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/0c62452ba27343779edc557b40897250 2024-12-15T04:41:05,838 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/0c62452ba27343779edc557b40897250, entries=150, sequenceid=299, filesize=30.5 K 2024-12-15T04:41:05,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/b2029f2264aa4905ae8e951038539dc2 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/b2029f2264aa4905ae8e951038539dc2 2024-12-15T04:41:05,841 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/b2029f2264aa4905ae8e951038539dc2, entries=150, sequenceid=299, filesize=12.0 K 2024-12-15T04:41:05,842 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/08467e57358e4cb1983e3a9dc57521a6 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/08467e57358e4cb1983e3a9dc57521a6 2024-12-15T04:41:05,844 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/08467e57358e4cb1983e3a9dc57521a6, entries=150, sequenceid=299, filesize=12.0 K 2024-12-15T04:41:05,845 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=20.13 KB/20610 for 3aea4ed87c2ca56ea7b1d05fdf98762f in 1679ms, sequenceid=299, compaction requested=true 2024-12-15T04:41:05,845 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:41:05,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3aea4ed87c2ca56ea7b1d05fdf98762f:A, priority=-2147483648, current under compaction store size is 1 2024-12-15T04:41:05,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:41:05,845 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:41:05,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3aea4ed87c2ca56ea7b1d05fdf98762f:B, priority=-2147483648, current under compaction store size is 2 2024-12-15T04:41:05,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:41:05,845 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-12-15T04:41:05,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3aea4ed87c2ca56ea7b1d05fdf98762f:C, priority=-2147483648, current under compaction store size is 3 2024-12-15T04:41:05,845 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:41:05,846 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94329 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:41:05,846 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1540): 3aea4ed87c2ca56ea7b1d05fdf98762f/A is initiating minor compaction (all files) 2024-12-15T04:41:05,846 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37467 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-12-15T04:41:05,846 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 3aea4ed87c2ca56ea7b1d05fdf98762f/B is initiating minor compaction (all files) 2024-12-15T04:41:05,846 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3aea4ed87c2ca56ea7b1d05fdf98762f/A in TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:05,846 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3aea4ed87c2ca56ea7b1d05fdf98762f/B in TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:05,846 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/9f94ffd5c36f4e9fa6451099135d9591, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/08b35907c72f4ab6b2f1f585f6f492fa, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/0c62452ba27343779edc557b40897250] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp, totalSize=92.1 K 2024-12-15T04:41:05,846 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/34e061923d65442d90fef23347fc5c02, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/b255e649c193491f859db79e504b048f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/b2029f2264aa4905ae8e951038539dc2] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp, totalSize=36.6 K 2024-12-15T04:41:05,846 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:05,846 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. files: [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/9f94ffd5c36f4e9fa6451099135d9591, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/08b35907c72f4ab6b2f1f585f6f492fa, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/0c62452ba27343779edc557b40897250] 2024-12-15T04:41:05,846 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 34e061923d65442d90fef23347fc5c02, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1734237659997 2024-12-15T04:41:05,846 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f94ffd5c36f4e9fa6451099135d9591, keycount=150, bloomtype=ROW, size=31.1 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1734237659997 2024-12-15T04:41:05,846 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting b255e649c193491f859db79e504b048f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1734237660021 2024-12-15T04:41:05,846 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 08b35907c72f4ab6b2f1f585f6f492fa, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1734237660021 2024-12-15T04:41:05,847 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting b2029f2264aa4905ae8e951038539dc2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1734237662149 2024-12-15T04:41:05,847 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c62452ba27343779edc557b40897250, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1734237662149 2024-12-15T04:41:05,851 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:41:05,851 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3aea4ed87c2ca56ea7b1d05fdf98762f#B#compaction#497 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:41:05,852 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/1a710dcff3bf489caf9a5c34faca271e is 50, key is test_row_0/B:col10/1734237662149/Put/seqid=0 2024-12-15T04:41:05,853 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241215b2ac175e95104e70a5eb32f4f6718b63_3aea4ed87c2ca56ea7b1d05fdf98762f store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:41:05,854 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241215b2ac175e95104e70a5eb32f4f6718b63_3aea4ed87c2ca56ea7b1d05fdf98762f, store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:41:05,854 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241215b2ac175e95104e70a5eb32f4f6718b63_3aea4ed87c2ca56ea7b1d05fdf98762f because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:41:05,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742426_1602 (size=13017) 2024-12-15T04:41:05,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742427_1603 (size=4469) 2024-12-15T04:41:06,258 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3aea4ed87c2ca56ea7b1d05fdf98762f#A#compaction#498 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:41:06,259 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/0305c80097344be3a65aefe5daa38340 is 175, key is test_row_0/A:col10/1734237662149/Put/seqid=0 2024-12-15T04:41:06,264 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/1a710dcff3bf489caf9a5c34faca271e as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/1a710dcff3bf489caf9a5c34faca271e 2024-12-15T04:41:06,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742428_1604 (size=31971) 2024-12-15T04:41:06,269 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3aea4ed87c2ca56ea7b1d05fdf98762f/B of 3aea4ed87c2ca56ea7b1d05fdf98762f into 1a710dcff3bf489caf9a5c34faca271e(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:41:06,269 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:41:06,269 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f., storeName=3aea4ed87c2ca56ea7b1d05fdf98762f/B, priority=13, startTime=1734237665845; duration=0sec 2024-12-15T04:41:06,269 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-12-15T04:41:06,269 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3aea4ed87c2ca56ea7b1d05fdf98762f:B 2024-12-15T04:41:06,269 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-12-15T04:41:06,270 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49550 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-12-15T04:41:06,270 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1540): 3aea4ed87c2ca56ea7b1d05fdf98762f/C is initiating minor compaction (all files) 2024-12-15T04:41:06,270 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3aea4ed87c2ca56ea7b1d05fdf98762f/C in TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:06,270 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/5356cdedce084654aba027742293e1e7, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/ff88905101094519b9b7205ce02d78b8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/d71cd4d779904bdd84721d217d8adbec, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/08467e57358e4cb1983e3a9dc57521a6] into tmpdir=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp, totalSize=48.4 K 2024-12-15T04:41:06,270 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5356cdedce084654aba027742293e1e7, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1734237657748 2024-12-15T04:41:06,271 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff88905101094519b9b7205ce02d78b8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1734237659997 2024-12-15T04:41:06,271 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting d71cd4d779904bdd84721d217d8adbec, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1734237660021 2024-12-15T04:41:06,271 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] compactions.Compactor(224): Compacting 08467e57358e4cb1983e3a9dc57521a6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=299, earliestPutTs=1734237662149 2024-12-15T04:41:06,277 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3aea4ed87c2ca56ea7b1d05fdf98762f#C#compaction#499 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-12-15T04:41:06,277 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/432064b33ab748599964f2ab5a86d701 is 50, key is test_row_0/C:col10/1734237662149/Put/seqid=0 2024-12-15T04:41:06,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742429_1605 (size=12983) 2024-12-15T04:41:06,676 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/0305c80097344be3a65aefe5daa38340 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/0305c80097344be3a65aefe5daa38340 2024-12-15T04:41:06,682 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3aea4ed87c2ca56ea7b1d05fdf98762f/A of 3aea4ed87c2ca56ea7b1d05fdf98762f into 0305c80097344be3a65aefe5daa38340(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:41:06,683 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:41:06,683 INFO [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f., storeName=3aea4ed87c2ca56ea7b1d05fdf98762f/A, priority=13, startTime=1734237665845; duration=0sec 2024-12-15T04:41:06,683 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:41:06,683 DEBUG [RS:0;e56de37b85b3:43199-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3aea4ed87c2ca56ea7b1d05fdf98762f:A 2024-12-15T04:41:06,686 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/432064b33ab748599964f2ab5a86d701 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/432064b33ab748599964f2ab5a86d701 2024-12-15T04:41:06,690 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 3aea4ed87c2ca56ea7b1d05fdf98762f/C of 3aea4ed87c2ca56ea7b1d05fdf98762f into 432064b33ab748599964f2ab5a86d701(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-12-15T04:41:06,690 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:41:06,690 INFO [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f., storeName=3aea4ed87c2ca56ea7b1d05fdf98762f/C, priority=12, startTime=1734237665845; duration=0sec 2024-12-15T04:41:06,690 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-12-15T04:41:06,690 DEBUG [RS:0;e56de37b85b3:43199-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3aea4ed87c2ca56ea7b1d05fdf98762f:C 2024-12-15T04:41:12,692 DEBUG [Thread-2318 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53305d9b to 127.0.0.1:55935 2024-12-15T04:41:12,692 DEBUG [Thread-2318 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:41:12,692 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-12-15T04:41:12,692 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 52 2024-12-15T04:41:12,692 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 49 2024-12-15T04:41:12,692 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 52 2024-12-15T04:41:12,692 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 52 2024-12-15T04:41:12,692 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 32 2024-12-15T04:41:12,692 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-12-15T04:41:12,692 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8083 2024-12-15T04:41:12,692 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8156 2024-12-15T04:41:12,692 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8139 2024-12-15T04:41:12,692 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8092 2024-12-15T04:41:12,692 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 8184 2024-12-15T04:41:12,692 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-12-15T04:41:12,693 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-15T04:41:12,693 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c9b5141 to 127.0.0.1:55935 2024-12-15T04:41:12,693 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:41:12,693 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-12-15T04:41:12,694 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-12-15T04:41:12,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-12-15T04:41:12,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-15T04:41:12,698 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237672697"}]},"ts":"1734237672697"} 2024-12-15T04:41:12,699 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-12-15T04:41:12,745 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-12-15T04:41:12,747 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-12-15T04:41:12,750 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=171, ppid=170, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3aea4ed87c2ca56ea7b1d05fdf98762f, UNASSIGN}] 2024-12-15T04:41:12,751 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3aea4ed87c2ca56ea7b1d05fdf98762f, UNASSIGN 2024-12-15T04:41:12,753 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=171 updating hbase:meta row=3aea4ed87c2ca56ea7b1d05fdf98762f, regionState=CLOSING, regionLocation=e56de37b85b3,43199,1734237482035 2024-12-15T04:41:12,754 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-15T04:41:12,754 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; CloseRegionProcedure 3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035}] 2024-12-15T04:41:12,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-15T04:41:12,907 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to e56de37b85b3,43199,1734237482035 2024-12-15T04:41:12,907 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(124): Close 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:12,907 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-15T04:41:12,907 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1681): Closing 3aea4ed87c2ca56ea7b1d05fdf98762f, disabling compactions & flushes 2024-12-15T04:41:12,907 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:12,908 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:12,908 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. after waiting 0 ms 2024-12-15T04:41:12,908 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:12,908 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(2837): Flushing 3aea4ed87c2ca56ea7b1d05fdf98762f 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-12-15T04:41:12,908 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=A 2024-12-15T04:41:12,908 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:41:12,908 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=B 2024-12-15T04:41:12,908 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:41:12,908 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3aea4ed87c2ca56ea7b1d05fdf98762f, store=C 2024-12-15T04:41:12,908 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-12-15T04:41:12,915 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121581582898c480447ca323ff4fe9791d07_3aea4ed87c2ca56ea7b1d05fdf98762f is 50, key is test_row_0/A:col10/1734237672690/Put/seqid=0 2024-12-15T04:41:12,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742430_1606 (size=12454) 2024-12-15T04:41:13,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-15T04:41:13,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-15T04:41:13,321 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-15T04:41:13,331 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024121581582898c480447ca323ff4fe9791d07_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121581582898c480447ca323ff4fe9791d07_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:13,332 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/6dcd4a18e6c94c94af7b3ef39530588f, store: [table=TestAcidGuarantees family=A region=3aea4ed87c2ca56ea7b1d05fdf98762f] 2024-12-15T04:41:13,332 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/6dcd4a18e6c94c94af7b3ef39530588f is 175, key is test_row_0/A:col10/1734237672690/Put/seqid=0 2024-12-15T04:41:13,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742431_1607 (size=31255) 2024-12-15T04:41:13,737 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=309, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/6dcd4a18e6c94c94af7b3ef39530588f 2024-12-15T04:41:13,742 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/10c5aca941de42f6b7d7b122a576e710 is 50, key is test_row_0/B:col10/1734237672690/Put/seqid=0 2024-12-15T04:41:13,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742432_1608 (size=12301) 2024-12-15T04:41:13,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-15T04:41:14,147 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/10c5aca941de42f6b7d7b122a576e710 2024-12-15T04:41:14,179 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/3f4a6e8b64de46f78055daf04d66c252 is 50, key is test_row_0/C:col10/1734237672690/Put/seqid=0 2024-12-15T04:41:14,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742433_1609 (size=12301) 2024-12-15T04:41:14,586 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/3f4a6e8b64de46f78055daf04d66c252 2024-12-15T04:41:14,596 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/A/6dcd4a18e6c94c94af7b3ef39530588f as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/6dcd4a18e6c94c94af7b3ef39530588f 2024-12-15T04:41:14,601 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/6dcd4a18e6c94c94af7b3ef39530588f, entries=150, sequenceid=309, filesize=30.5 K 2024-12-15T04:41:14,603 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/B/10c5aca941de42f6b7d7b122a576e710 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/10c5aca941de42f6b7d7b122a576e710 2024-12-15T04:41:14,606 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/10c5aca941de42f6b7d7b122a576e710, entries=150, sequenceid=309, filesize=12.0 K 2024-12-15T04:41:14,607 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/.tmp/C/3f4a6e8b64de46f78055daf04d66c252 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/3f4a6e8b64de46f78055daf04d66c252 2024-12-15T04:41:14,610 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/3f4a6e8b64de46f78055daf04d66c252, entries=150, sequenceid=309, filesize=12.0 K 2024-12-15T04:41:14,611 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 3aea4ed87c2ca56ea7b1d05fdf98762f in 1703ms, sequenceid=309, compaction requested=false 2024-12-15T04:41:14,612 DEBUG [StoreCloser-TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/19629d6e5f0745b982c56278bfc22ba8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/350e5bdb29e94244bec86a2c3e7a40b9, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/657b42659b5b4640a0a7aa1c3a82cf27, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/740bf57f0e974be3898e91b6e504836b, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/dba1658152dd40da8353007de8bad591, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/1e663ad022ba48b09570d58775208f46, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/d8ab357744314efdbc260bb6faa4fae4, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/a366d367149846d8b634c3891a97a186, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/8983821de9d04f48a90463921c021795, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/e6816a312873443db70ef89333d0214e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/ae78338ab8d0449f8c4d08fefb1260f1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/25311ab5b8da436c81f4adf816bb298a, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/e3304fde291c4a089907b9097c810483, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/74d555b74ddc4c01b773a755d48c41d4, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/edc94ddbe9084fe6b5982786b5214031, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/9e8ea87539be40ef9fa8867fa64a45eb, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/db1ed16784784a51a29beb8c848e6a50, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/9f94ffd5c36f4e9fa6451099135d9591, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/813dbf433b16463b8a69712f783cf944, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/08b35907c72f4ab6b2f1f585f6f492fa, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/0c62452ba27343779edc557b40897250] to archive 2024-12-15T04:41:14,613 DEBUG [StoreCloser-TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-15T04:41:14,615 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/350e5bdb29e94244bec86a2c3e7a40b9 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/350e5bdb29e94244bec86a2c3e7a40b9 2024-12-15T04:41:14,616 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/657b42659b5b4640a0a7aa1c3a82cf27 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/657b42659b5b4640a0a7aa1c3a82cf27 2024-12-15T04:41:14,616 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/19629d6e5f0745b982c56278bfc22ba8 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/19629d6e5f0745b982c56278bfc22ba8 2024-12-15T04:41:14,616 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/dba1658152dd40da8353007de8bad591 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/dba1658152dd40da8353007de8bad591 2024-12-15T04:41:14,616 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/740bf57f0e974be3898e91b6e504836b to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/740bf57f0e974be3898e91b6e504836b 2024-12-15T04:41:14,616 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/1e663ad022ba48b09570d58775208f46 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/1e663ad022ba48b09570d58775208f46 2024-12-15T04:41:14,616 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/d8ab357744314efdbc260bb6faa4fae4 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/d8ab357744314efdbc260bb6faa4fae4 2024-12-15T04:41:14,617 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/a366d367149846d8b634c3891a97a186 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/a366d367149846d8b634c3891a97a186 2024-12-15T04:41:14,617 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/8983821de9d04f48a90463921c021795 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/8983821de9d04f48a90463921c021795 2024-12-15T04:41:14,617 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/e6816a312873443db70ef89333d0214e to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/e6816a312873443db70ef89333d0214e 2024-12-15T04:41:14,617 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/ae78338ab8d0449f8c4d08fefb1260f1 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/ae78338ab8d0449f8c4d08fefb1260f1 2024-12-15T04:41:14,618 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/e3304fde291c4a089907b9097c810483 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/e3304fde291c4a089907b9097c810483 2024-12-15T04:41:14,618 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/25311ab5b8da436c81f4adf816bb298a to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/25311ab5b8da436c81f4adf816bb298a 2024-12-15T04:41:14,618 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/74d555b74ddc4c01b773a755d48c41d4 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/74d555b74ddc4c01b773a755d48c41d4 2024-12-15T04:41:14,619 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/edc94ddbe9084fe6b5982786b5214031 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/edc94ddbe9084fe6b5982786b5214031 2024-12-15T04:41:14,619 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/9e8ea87539be40ef9fa8867fa64a45eb to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/9e8ea87539be40ef9fa8867fa64a45eb 2024-12-15T04:41:14,619 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/db1ed16784784a51a29beb8c848e6a50 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/db1ed16784784a51a29beb8c848e6a50 2024-12-15T04:41:14,619 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/813dbf433b16463b8a69712f783cf944 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/813dbf433b16463b8a69712f783cf944 2024-12-15T04:41:14,619 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/9f94ffd5c36f4e9fa6451099135d9591 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/9f94ffd5c36f4e9fa6451099135d9591 2024-12-15T04:41:14,620 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/08b35907c72f4ab6b2f1f585f6f492fa to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/08b35907c72f4ab6b2f1f585f6f492fa 2024-12-15T04:41:14,620 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/0c62452ba27343779edc557b40897250 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/0c62452ba27343779edc557b40897250 2024-12-15T04:41:14,621 DEBUG [StoreCloser-TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/6895276a7a4746188213579e62ab602f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/897f9a58de21497b957ffc056ad886bb, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/3092f3fdd5ec4f5b927edfa13bfa8c5e, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/766b77ceacc743e98e1bb147958d525a, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/db4212097e85495287a9baf7427587d9, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/fdb41992d2cc496bb764d202b94b399d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/26b978a0fc284b34ad1d3d1a946018a7, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/84a3ee74bb8e4d5aab805c86f33f7984, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/c30114945dac41aa8167041d64677829, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/9d97abb7a1e24e48aacd1ab3700742c1, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/73f96992fdc04b44b627b72a0665998f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/c17f1db4df364815b805505df945a433, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/d99448d04c0d44d1815de4ad4ed7c65d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/72d4e6abc2614bbd9ca61928945196d8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/523498be993f46c98e536dac6e4f37cd, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/9bdc354f44d94d75a6a59fc37a6b318d, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/bfe08495cbe6472cb7ff9ae905d4da39, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/34e061923d65442d90fef23347fc5c02, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/5e7734fa8da34e3b89a3a00fcdb3d507, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/b255e649c193491f859db79e504b048f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/b2029f2264aa4905ae8e951038539dc2] to archive 2024-12-15T04:41:14,622 DEBUG [StoreCloser-TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-15T04:41:14,624 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/6895276a7a4746188213579e62ab602f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/6895276a7a4746188213579e62ab602f 2024-12-15T04:41:14,624 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/db4212097e85495287a9baf7427587d9 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/db4212097e85495287a9baf7427587d9 2024-12-15T04:41:14,624 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/766b77ceacc743e98e1bb147958d525a to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/766b77ceacc743e98e1bb147958d525a 2024-12-15T04:41:14,624 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/fdb41992d2cc496bb764d202b94b399d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/fdb41992d2cc496bb764d202b94b399d 2024-12-15T04:41:14,624 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/3092f3fdd5ec4f5b927edfa13bfa8c5e to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/3092f3fdd5ec4f5b927edfa13bfa8c5e 2024-12-15T04:41:14,624 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/26b978a0fc284b34ad1d3d1a946018a7 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/26b978a0fc284b34ad1d3d1a946018a7 2024-12-15T04:41:14,624 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/84a3ee74bb8e4d5aab805c86f33f7984 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/84a3ee74bb8e4d5aab805c86f33f7984 2024-12-15T04:41:14,624 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/897f9a58de21497b957ffc056ad886bb to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/897f9a58de21497b957ffc056ad886bb 2024-12-15T04:41:14,625 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/c17f1db4df364815b805505df945a433 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/c17f1db4df364815b805505df945a433 2024-12-15T04:41:14,625 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/9d97abb7a1e24e48aacd1ab3700742c1 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/9d97abb7a1e24e48aacd1ab3700742c1 2024-12-15T04:41:14,626 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/73f96992fdc04b44b627b72a0665998f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/73f96992fdc04b44b627b72a0665998f 2024-12-15T04:41:14,626 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/523498be993f46c98e536dac6e4f37cd to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/523498be993f46c98e536dac6e4f37cd 2024-12-15T04:41:14,626 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/c30114945dac41aa8167041d64677829 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/c30114945dac41aa8167041d64677829 2024-12-15T04:41:14,626 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/9bdc354f44d94d75a6a59fc37a6b318d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/9bdc354f44d94d75a6a59fc37a6b318d 2024-12-15T04:41:14,626 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/72d4e6abc2614bbd9ca61928945196d8 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/72d4e6abc2614bbd9ca61928945196d8 2024-12-15T04:41:14,626 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/d99448d04c0d44d1815de4ad4ed7c65d to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/d99448d04c0d44d1815de4ad4ed7c65d 2024-12-15T04:41:14,627 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/34e061923d65442d90fef23347fc5c02 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/34e061923d65442d90fef23347fc5c02 2024-12-15T04:41:14,627 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/bfe08495cbe6472cb7ff9ae905d4da39 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/bfe08495cbe6472cb7ff9ae905d4da39 2024-12-15T04:41:14,627 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/5e7734fa8da34e3b89a3a00fcdb3d507 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/5e7734fa8da34e3b89a3a00fcdb3d507 2024-12-15T04:41:14,627 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/b2029f2264aa4905ae8e951038539dc2 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/b2029f2264aa4905ae8e951038539dc2 2024-12-15T04:41:14,627 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/b255e649c193491f859db79e504b048f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/b255e649c193491f859db79e504b048f 2024-12-15T04:41:14,631 DEBUG [StoreCloser-TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/0197c48f505b4cd69398a627bf3c90be, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/3d75704c556b4498b6f300250cb3a771, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/ab9ba3eee34b40d7aad68e431a329585, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/563284641da84f60ab04c580ccee99a7, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/7df46f4a24db445f8b6969de65f7a554, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/fe285c5affa54fe3b5f2c550eee1fbed, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/1409b48705ed405280fe1b066c3b5d62, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/9f018f1043b6469fb6ec01e91c9e587a, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/6fbf055e384b47d9b3047810a0e6c50f, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/4b9ec3d055964fe5b10925bef804b425, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/206ec7a4ff524dcf9f09fbccadb745ab, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/6d2e5fdfd4f140c7b24a84d43e9e632c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/545fb0444087456983ba8c609ad4e5c0, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/c6c92f03d0934c81803bf30fda08d29c, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/5bdebc7e23dc475d88beb181d6d02b84, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/5356cdedce084654aba027742293e1e7, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/5c1892a2173f46cb947aad45ea1fc947, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/ff88905101094519b9b7205ce02d78b8, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/d71cd4d779904bdd84721d217d8adbec, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/08467e57358e4cb1983e3a9dc57521a6] to archive 2024-12-15T04:41:14,632 DEBUG [StoreCloser-TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.-1 {}] backup.HFileArchiver(363): Archiving compacted files. 2024-12-15T04:41:14,633 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/563284641da84f60ab04c580ccee99a7 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/563284641da84f60ab04c580ccee99a7 2024-12-15T04:41:14,633 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/fe285c5affa54fe3b5f2c550eee1fbed to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/fe285c5affa54fe3b5f2c550eee1fbed 2024-12-15T04:41:14,633 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/0197c48f505b4cd69398a627bf3c90be to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/0197c48f505b4cd69398a627bf3c90be 2024-12-15T04:41:14,634 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/ab9ba3eee34b40d7aad68e431a329585 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/ab9ba3eee34b40d7aad68e431a329585 2024-12-15T04:41:14,634 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/7df46f4a24db445f8b6969de65f7a554 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/7df46f4a24db445f8b6969de65f7a554 2024-12-15T04:41:14,634 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/1409b48705ed405280fe1b066c3b5d62 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/1409b48705ed405280fe1b066c3b5d62 2024-12-15T04:41:14,634 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/3d75704c556b4498b6f300250cb3a771 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/3d75704c556b4498b6f300250cb3a771 2024-12-15T04:41:14,634 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/9f018f1043b6469fb6ec01e91c9e587a to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/9f018f1043b6469fb6ec01e91c9e587a 2024-12-15T04:41:14,635 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/206ec7a4ff524dcf9f09fbccadb745ab to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/206ec7a4ff524dcf9f09fbccadb745ab 2024-12-15T04:41:14,635 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/6fbf055e384b47d9b3047810a0e6c50f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/6fbf055e384b47d9b3047810a0e6c50f 2024-12-15T04:41:14,635 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/4b9ec3d055964fe5b10925bef804b425 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/4b9ec3d055964fe5b10925bef804b425 2024-12-15T04:41:14,635 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/6d2e5fdfd4f140c7b24a84d43e9e632c to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/6d2e5fdfd4f140c7b24a84d43e9e632c 2024-12-15T04:41:14,635 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/c6c92f03d0934c81803bf30fda08d29c to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/c6c92f03d0934c81803bf30fda08d29c 2024-12-15T04:41:14,635 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/5bdebc7e23dc475d88beb181d6d02b84 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/5bdebc7e23dc475d88beb181d6d02b84 2024-12-15T04:41:14,635 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/545fb0444087456983ba8c609ad4e5c0 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/545fb0444087456983ba8c609ad4e5c0 2024-12-15T04:41:14,635 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/5356cdedce084654aba027742293e1e7 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/5356cdedce084654aba027742293e1e7 2024-12-15T04:41:14,635 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/ff88905101094519b9b7205ce02d78b8 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/ff88905101094519b9b7205ce02d78b8 2024-12-15T04:41:14,635 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/5c1892a2173f46cb947aad45ea1fc947 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/5c1892a2173f46cb947aad45ea1fc947 2024-12-15T04:41:14,635 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/d71cd4d779904bdd84721d217d8adbec to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/d71cd4d779904bdd84721d217d8adbec 2024-12-15T04:41:14,636 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileableStoreFile, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/08467e57358e4cb1983e3a9dc57521a6 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/08467e57358e4cb1983e3a9dc57521a6 2024-12-15T04:41:14,638 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/recovered.edits/312.seqid, newMaxSeqId=312, maxSeqId=4 2024-12-15T04:41:14,638 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f. 2024-12-15T04:41:14,639 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] regionserver.HRegion(1635): Region close journal for 3aea4ed87c2ca56ea7b1d05fdf98762f: 2024-12-15T04:41:14,639 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION, pid=172}] handler.UnassignRegionHandler(170): Closed 3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:14,640 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=171 updating hbase:meta row=3aea4ed87c2ca56ea7b1d05fdf98762f, regionState=CLOSED 2024-12-15T04:41:14,641 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-12-15T04:41:14,642 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; CloseRegionProcedure 3aea4ed87c2ca56ea7b1d05fdf98762f, server=e56de37b85b3,43199,1734237482035 in 1.8870 sec 2024-12-15T04:41:14,642 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=171, resume processing ppid=170 2024-12-15T04:41:14,642 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, ppid=170, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3aea4ed87c2ca56ea7b1d05fdf98762f, UNASSIGN in 1.8920 sec 2024-12-15T04:41:14,643 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-12-15T04:41:14,643 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8970 sec 2024-12-15T04:41:14,644 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1734237674644"}]},"ts":"1734237674644"} 2024-12-15T04:41:14,645 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-12-15T04:41:14,683 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-12-15T04:41:14,685 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9900 sec 2024-12-15T04:41:14,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-12-15T04:41:14,806 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-12-15T04:41:14,807 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-12-15T04:41:14,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:41:14,811 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=173, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:41:14,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-15T04:41:14,812 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=173, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:41:14,816 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:14,820 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A, FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B, FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C, FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/recovered.edits] 2024-12-15T04:41:14,824 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/0305c80097344be3a65aefe5daa38340 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/0305c80097344be3a65aefe5daa38340 2024-12-15T04:41:14,824 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/6dcd4a18e6c94c94af7b3ef39530588f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/A/6dcd4a18e6c94c94af7b3ef39530588f 2024-12-15T04:41:14,828 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/10c5aca941de42f6b7d7b122a576e710 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/10c5aca941de42f6b7d7b122a576e710 2024-12-15T04:41:14,828 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/1a710dcff3bf489caf9a5c34faca271e to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/B/1a710dcff3bf489caf9a5c34faca271e 2024-12-15T04:41:14,833 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/432064b33ab748599964f2ab5a86d701 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/432064b33ab748599964f2ab5a86d701 2024-12-15T04:41:14,833 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/3f4a6e8b64de46f78055daf04d66c252 to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/C/3f4a6e8b64de46f78055daf04d66c252 2024-12-15T04:41:14,836 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/recovered.edits/312.seqid to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f/recovered.edits/312.seqid 2024-12-15T04:41:14,837 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/default/TestAcidGuarantees/3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:14,837 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-12-15T04:41:14,837 DEBUG [PEWorker-2 {}] backup.HFileArchiver(133): ARCHIVING hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-15T04:41:14,838 DEBUG [PEWorker-2 {}] backup.HFileArchiver(161): Archiving [FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-12-15T04:41:14,843 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121507c58f3099444eb886576994951086b1_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121507c58f3099444eb886576994951086b1_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:14,843 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412153ae7bb8208b94591b83f3ae37b724482_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412153ae7bb8208b94591b83f3ae37b724482_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:14,843 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215641c77d8c1034e65b3d6a357f4d1efea_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215641c77d8c1034e65b3d6a357f4d1efea_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:14,843 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412154d055555fdfb442b9524014f931d4d91_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202412154d055555fdfb442b9524014f931d4d91_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:14,843 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121564b7e3fcf9a94248829dd9d21ff8c8e6_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121564b7e3fcf9a94248829dd9d21ff8c8e6_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:14,843 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121582d1afe7a6fd4d70b0eaa6f11ca43ed3_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121582d1afe7a6fd4d70b0eaa6f11ca43ed3_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:14,844 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121581582898c480447ca323ff4fe9791d07_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024121581582898c480447ca323ff4fe9791d07_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:14,844 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215827908f553d44627a4917aaa8505f971_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215827908f553d44627a4917aaa8505f971_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:14,844 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215c81e7d971105477ea259dbd3dad38819_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215c81e7d971105477ea259dbd3dad38819_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:14,844 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215b56a9bb4418d44999df03c89e0e8a467_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215b56a9bb4418d44999df03c89e0e8a467_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:14,845 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215a935ac18ea924f9aadf1d9a32ed1fe2e_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215a935ac18ea924f9aadf1d9a32ed1fe2e_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:14,845 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215d487b39172e843edb10a1ffe1e445524_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215d487b39172e843edb10a1ffe1e445524_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:14,845 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215a16f196678384850ada6cff2d5ff3734_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215a16f196678384850ada6cff2d5ff3734_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:14,845 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215cea4176499714d548985bcdc2ebaba91_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215cea4176499714d548985bcdc2ebaba91_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:14,845 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215e446431fac8a4c61a0c95d4c5c408a7b_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215e446431fac8a4c61a0c95d4c5c408a7b_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:14,845 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215e5422355273d42a986db12a9ccea1ef4_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215e5422355273d42a986db12a9ccea1ef4_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:14,845 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(620): Archived from FileablePath, hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215f5c8e7af223044559f7fd55f08b0e3a1_3aea4ed87c2ca56ea7b1d05fdf98762f to hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241215f5c8e7af223044559f7fd55f08b0e3a1_3aea4ed87c2ca56ea7b1d05fdf98762f 2024-12-15T04:41:14,846 DEBUG [PEWorker-2 {}] backup.HFileArchiver(634): Deleted hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-12-15T04:41:14,848 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=173, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:41:14,850 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(371): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-12-15T04:41:14,851 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(408): Removing 'TestAcidGuarantees' descriptor. 2024-12-15T04:41:14,852 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=173, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:41:14,852 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(398): Removing 'TestAcidGuarantees' from region states. 2024-12-15T04:41:14,852 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1734237674852"}]},"ts":"9223372036854775807"} 2024-12-15T04:41:14,853 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-15T04:41:14,853 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 3aea4ed87c2ca56ea7b1d05fdf98762f, NAME => 'TestAcidGuarantees,,1734237639201.3aea4ed87c2ca56ea7b1d05fdf98762f.', STARTKEY => '', ENDKEY => ''}] 2024-12-15T04:41:14,854 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(402): Marking 'TestAcidGuarantees' as deleted. 2024-12-15T04:41:14,854 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1734237674854"}]},"ts":"9223372036854775807"} 2024-12-15T04:41:14,855 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-12-15T04:41:14,862 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=173, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-12-15T04:41:14,863 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 55 msec 2024-12-15T04:41:14,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35185 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-15T04:41:14,914 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-12-15T04:41:14,928 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithAdaptivePolicy#testMobGetAtomicity Thread=244 (was 244), OpenFileDescriptor=450 (was 449) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=303 (was 315), ProcessCount=11 (was 11), AvailableMemoryMB=4446 (was 4470) 2024-12-15T04:41:14,928 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-15T04:41:14,928 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-15T04:41:14,928 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e83c466 to 127.0.0.1:55935 2024-12-15T04:41:14,928 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:41:14,928 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-15T04:41:14,929 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=702506913, stopped=false 2024-12-15T04:41:14,929 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=e56de37b85b3,35185,1734237481331 2024-12-15T04:41:14,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-15T04:41:14,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43199-0x10027fb030d0001, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-15T04:41:14,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43199-0x10027fb030d0001, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T04:41:14,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T04:41:14,936 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-15T04:41:14,937 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:41:14,937 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'e56de37b85b3,43199,1734237482035' ***** 2024-12-15T04:41:14,937 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-15T04:41:14,937 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43199-0x10027fb030d0001, quorum=127.0.0.1:55935, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-15T04:41:14,937 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-15T04:41:14,937 INFO [RS:0;e56de37b85b3:43199 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-15T04:41:14,938 INFO [RS:0;e56de37b85b3:43199 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-15T04:41:14,938 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-15T04:41:14,938 INFO [RS:0;e56de37b85b3:43199 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-15T04:41:14,938 INFO [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(3579): Received CLOSE for 7a581d77bd6cf0246603236a6705aded 2024-12-15T04:41:14,938 INFO [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(1224): stopping server e56de37b85b3,43199,1734237482035 2024-12-15T04:41:14,938 DEBUG [RS:0;e56de37b85b3:43199 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:41:14,939 INFO [RS:0;e56de37b85b3:43199 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-15T04:41:14,939 INFO [RS:0;e56de37b85b3:43199 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-15T04:41:14,939 INFO [RS:0;e56de37b85b3:43199 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-15T04:41:14,939 INFO [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-15T04:41:14,939 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 7a581d77bd6cf0246603236a6705aded, disabling compactions & flushes 2024-12-15T04:41:14,939 INFO [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-15T04:41:14,939 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1734237486528.7a581d77bd6cf0246603236a6705aded. 2024-12-15T04:41:14,939 DEBUG [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 7a581d77bd6cf0246603236a6705aded=hbase:namespace,,1734237486528.7a581d77bd6cf0246603236a6705aded.} 2024-12-15T04:41:14,939 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1734237486528.7a581d77bd6cf0246603236a6705aded. 2024-12-15T04:41:14,939 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1734237486528.7a581d77bd6cf0246603236a6705aded. after waiting 0 ms 2024-12-15T04:41:14,939 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1734237486528.7a581d77bd6cf0246603236a6705aded. 2024-12-15T04:41:14,939 DEBUG [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-15T04:41:14,939 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 7a581d77bd6cf0246603236a6705aded 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-15T04:41:14,939 INFO [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-15T04:41:14,939 DEBUG [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-15T04:41:14,939 DEBUG [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-15T04:41:14,939 INFO [regionserver/e56de37b85b3:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-15T04:41:14,939 DEBUG [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-15T04:41:14,940 INFO [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-12-15T04:41:14,942 DEBUG [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 7a581d77bd6cf0246603236a6705aded 2024-12-15T04:41:14,955 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/namespace/7a581d77bd6cf0246603236a6705aded/.tmp/info/8fd9a2d9a1804a4fba8d8a11a2a62e96 is 45, key is default/info:d/1734237487985/Put/seqid=0 2024-12-15T04:41:14,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742434_1610 (size=5037) 2024-12-15T04:41:14,959 DEBUG [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/meta/1588230740/.tmp/info/e2eb0ffef4fe41149a645c19e4bdeaaa is 143, key is hbase:namespace,,1734237486528.7a581d77bd6cf0246603236a6705aded./info:regioninfo/1734237487825/Put/seqid=0 2024-12-15T04:41:14,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742435_1611 (size=7725) 2024-12-15T04:41:15,043 INFO [regionserver/e56de37b85b3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-15T04:41:15,043 INFO [regionserver/e56de37b85b3:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-15T04:41:15,142 DEBUG [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 7a581d77bd6cf0246603236a6705aded 2024-12-15T04:41:15,343 DEBUG [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 7a581d77bd6cf0246603236a6705aded 2024-12-15T04:41:15,360 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/namespace/7a581d77bd6cf0246603236a6705aded/.tmp/info/8fd9a2d9a1804a4fba8d8a11a2a62e96 2024-12-15T04:41:15,364 INFO [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/meta/1588230740/.tmp/info/e2eb0ffef4fe41149a645c19e4bdeaaa 2024-12-15T04:41:15,369 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/namespace/7a581d77bd6cf0246603236a6705aded/.tmp/info/8fd9a2d9a1804a4fba8d8a11a2a62e96 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/namespace/7a581d77bd6cf0246603236a6705aded/info/8fd9a2d9a1804a4fba8d8a11a2a62e96 2024-12-15T04:41:15,374 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/namespace/7a581d77bd6cf0246603236a6705aded/info/8fd9a2d9a1804a4fba8d8a11a2a62e96, entries=2, sequenceid=6, filesize=4.9 K 2024-12-15T04:41:15,375 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 7a581d77bd6cf0246603236a6705aded in 435ms, sequenceid=6, compaction requested=false 2024-12-15T04:41:15,377 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/namespace/7a581d77bd6cf0246603236a6705aded/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-15T04:41:15,378 INFO [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1734237486528.7a581d77bd6cf0246603236a6705aded. 2024-12-15T04:41:15,378 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 7a581d77bd6cf0246603236a6705aded: 2024-12-15T04:41:15,378 DEBUG [RS_CLOSE_REGION-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1734237486528.7a581d77bd6cf0246603236a6705aded. 2024-12-15T04:41:15,386 DEBUG [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/meta/1588230740/.tmp/rep_barrier/818df4d6483e4427b8e1d2624501303d is 102, key is TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47./rep_barrier:/1734237518047/DeleteFamily/seqid=0 2024-12-15T04:41:15,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742436_1612 (size=6025) 2024-12-15T04:41:15,543 DEBUG [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-15T04:41:15,744 DEBUG [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-15T04:41:15,790 INFO [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/meta/1588230740/.tmp/rep_barrier/818df4d6483e4427b8e1d2624501303d 2024-12-15T04:41:15,818 DEBUG [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/meta/1588230740/.tmp/table/c82e6fb417144ca284e152ab4473220d is 96, key is TestAcidGuarantees,,1734237488277.6443b0fc7191a86cb86de2a8c7e17f47./table:/1734237518047/DeleteFamily/seqid=0 2024-12-15T04:41:15,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742437_1613 (size=5942) 2024-12-15T04:41:15,944 INFO [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-15T04:41:15,944 DEBUG [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-15T04:41:15,944 DEBUG [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-15T04:41:16,144 DEBUG [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-12-15T04:41:16,222 INFO [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/meta/1588230740/.tmp/table/c82e6fb417144ca284e152ab4473220d 2024-12-15T04:41:16,232 DEBUG [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/meta/1588230740/.tmp/info/e2eb0ffef4fe41149a645c19e4bdeaaa as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/meta/1588230740/info/e2eb0ffef4fe41149a645c19e4bdeaaa 2024-12-15T04:41:16,235 INFO [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/meta/1588230740/info/e2eb0ffef4fe41149a645c19e4bdeaaa, entries=22, sequenceid=93, filesize=7.5 K 2024-12-15T04:41:16,236 DEBUG [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/meta/1588230740/.tmp/rep_barrier/818df4d6483e4427b8e1d2624501303d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/meta/1588230740/rep_barrier/818df4d6483e4427b8e1d2624501303d 2024-12-15T04:41:16,240 INFO [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/meta/1588230740/rep_barrier/818df4d6483e4427b8e1d2624501303d, entries=6, sequenceid=93, filesize=5.9 K 2024-12-15T04:41:16,241 DEBUG [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/meta/1588230740/.tmp/table/c82e6fb417144ca284e152ab4473220d as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/meta/1588230740/table/c82e6fb417144ca284e152ab4473220d 2024-12-15T04:41:16,244 INFO [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/meta/1588230740/table/c82e6fb417144ca284e152ab4473220d, entries=9, sequenceid=93, filesize=5.8 K 2024-12-15T04:41:16,245 INFO [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1306ms, sequenceid=93, compaction requested=false 2024-12-15T04:41:16,249 DEBUG [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-12-15T04:41:16,249 DEBUG [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-15T04:41:16,249 INFO [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-15T04:41:16,249 DEBUG [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-15T04:41:16,249 DEBUG [RS_CLOSE_META-regionserver/e56de37b85b3:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-15T04:41:16,345 INFO [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(1250): stopping server e56de37b85b3,43199,1734237482035; all regions closed. 2024-12-15T04:41:16,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741834_1010 (size=26050) 2024-12-15T04:41:16,356 DEBUG [RS:0;e56de37b85b3:43199 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/oldWALs 2024-12-15T04:41:16,357 INFO [RS:0;e56de37b85b3:43199 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL e56de37b85b3%2C43199%2C1734237482035.meta:.meta(num 1734237486235) 2024-12-15T04:41:16,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741832_1008 (size=13540592) 2024-12-15T04:41:16,362 DEBUG [RS:0;e56de37b85b3:43199 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/oldWALs 2024-12-15T04:41:16,362 INFO [RS:0;e56de37b85b3:43199 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL e56de37b85b3%2C43199%2C1734237482035:(num 1734237485191) 2024-12-15T04:41:16,362 DEBUG [RS:0;e56de37b85b3:43199 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:41:16,362 INFO [RS:0;e56de37b85b3:43199 {}] regionserver.LeaseManager(133): Closed leases 2024-12-15T04:41:16,362 INFO [RS:0;e56de37b85b3:43199 {}] hbase.ChoreService(370): Chore service for: regionserver/e56de37b85b3:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-15T04:41:16,363 INFO [regionserver/e56de37b85b3:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-15T04:41:16,363 INFO [RS:0;e56de37b85b3:43199 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:43199 2024-12-15T04:41:16,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43199-0x10027fb030d0001, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/e56de37b85b3,43199,1734237482035 2024-12-15T04:41:16,400 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-15T04:41:16,408 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [e56de37b85b3,43199,1734237482035] 2024-12-15T04:41:16,408 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing e56de37b85b3,43199,1734237482035; numProcessing=1 2024-12-15T04:41:16,416 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/e56de37b85b3,43199,1734237482035 already deleted, retry=false 2024-12-15T04:41:16,417 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; e56de37b85b3,43199,1734237482035 expired; onlineServers=0 2024-12-15T04:41:16,417 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server 'e56de37b85b3,35185,1734237481331' ***** 2024-12-15T04:41:16,417 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-15T04:41:16,417 DEBUG [M:0;e56de37b85b3:35185 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6956a245, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=e56de37b85b3/172.17.0.2:0 2024-12-15T04:41:16,417 INFO [M:0;e56de37b85b3:35185 {}] regionserver.HRegionServer(1224): stopping server e56de37b85b3,35185,1734237481331 2024-12-15T04:41:16,417 INFO [M:0;e56de37b85b3:35185 {}] regionserver.HRegionServer(1250): stopping server e56de37b85b3,35185,1734237481331; all regions closed. 2024-12-15T04:41:16,417 DEBUG [M:0;e56de37b85b3:35185 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-15T04:41:16,417 DEBUG [M:0;e56de37b85b3:35185 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-15T04:41:16,418 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-15T04:41:16,418 DEBUG [M:0;e56de37b85b3:35185 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-15T04:41:16,418 DEBUG [master/e56de37b85b3:0:becomeActiveMaster-HFileCleaner.large.0-1734237484942 {}] cleaner.HFileCleaner(306): Exit Thread[master/e56de37b85b3:0:becomeActiveMaster-HFileCleaner.large.0-1734237484942,5,FailOnTimeoutGroup] 2024-12-15T04:41:16,418 DEBUG [master/e56de37b85b3:0:becomeActiveMaster-HFileCleaner.small.0-1734237484942 {}] cleaner.HFileCleaner(306): Exit Thread[master/e56de37b85b3:0:becomeActiveMaster-HFileCleaner.small.0-1734237484942,5,FailOnTimeoutGroup] 2024-12-15T04:41:16,418 INFO [M:0;e56de37b85b3:35185 {}] hbase.ChoreService(370): Chore service for: master/e56de37b85b3:0 had [] on shutdown 2024-12-15T04:41:16,418 DEBUG [M:0;e56de37b85b3:35185 {}] master.HMaster(1733): Stopping service threads 2024-12-15T04:41:16,418 INFO [M:0;e56de37b85b3:35185 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-15T04:41:16,418 ERROR [M:0;e56de37b85b3:35185 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Client (59733779) connection to localhost/127.0.0.1:35921 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:35921,5,PEWorkerGroup] 2024-12-15T04:41:16,419 INFO [M:0;e56de37b85b3:35185 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-15T04:41:16,420 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-15T04:41:16,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-15T04:41:16,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-15T04:41:16,428 DEBUG [M:0;e56de37b85b3:35185 {}] zookeeper.ZKUtil(347): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-15T04:41:16,428 WARN [M:0;e56de37b85b3:35185 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-15T04:41:16,428 INFO [M:0;e56de37b85b3:35185 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-15T04:41:16,429 INFO [M:0;e56de37b85b3:35185 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-15T04:41:16,429 DEBUG [M:0;e56de37b85b3:35185 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-15T04:41:16,429 INFO [M:0;e56de37b85b3:35185 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-15T04:41:16,429 DEBUG [M:0;e56de37b85b3:35185 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-15T04:41:16,429 DEBUG [M:0;e56de37b85b3:35185 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-15T04:41:16,429 DEBUG [M:0;e56de37b85b3:35185 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-15T04:41:16,429 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-15T04:41:16,429 INFO [M:0;e56de37b85b3:35185 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=750.96 KB heapSize=922.17 KB 2024-12-15T04:41:16,450 DEBUG [M:0;e56de37b85b3:35185 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c237a7593f6642ed9c1e07a7b97a32df is 82, key is hbase:meta,,1/info:regioninfo/1734237486344/Put/seqid=0 2024-12-15T04:41:16,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742438_1614 (size=5672) 2024-12-15T04:41:16,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43199-0x10027fb030d0001, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-15T04:41:16,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43199-0x10027fb030d0001, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-15T04:41:16,508 INFO [RS:0;e56de37b85b3:43199 {}] regionserver.HRegionServer(1307): Exiting; stopping=e56de37b85b3,43199,1734237482035; zookeeper connection closed. 2024-12-15T04:41:16,509 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7a7338b5 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7a7338b5 2024-12-15T04:41:16,510 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-12-15T04:41:16,854 INFO [M:0;e56de37b85b3:35185 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2107 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c237a7593f6642ed9c1e07a7b97a32df 2024-12-15T04:41:16,880 DEBUG [M:0;e56de37b85b3:35185 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9be51774b2ba421e841fed33a3ff3172 is 2285, key is \x00\x00\x00\x00\x00\x00\x00\x94/proc:d/1734237642321/Put/seqid=0 2024-12-15T04:41:16,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742439_1615 (size=44074) 2024-12-15T04:41:17,286 INFO [M:0;e56de37b85b3:35185 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=750.40 KB at sequenceid=2107 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9be51774b2ba421e841fed33a3ff3172 2024-12-15T04:41:17,295 INFO [M:0;e56de37b85b3:35185 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9be51774b2ba421e841fed33a3ff3172 2024-12-15T04:41:17,312 DEBUG [M:0;e56de37b85b3:35185 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a64fae2811034b73b9fe9da9fdd27b41 is 69, key is e56de37b85b3,43199,1734237482035/rs:state/1734237484971/Put/seqid=0 2024-12-15T04:41:17,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073742440_1616 (size=5156) 2024-12-15T04:41:17,716 INFO [M:0;e56de37b85b3:35185 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2107 (bloomFilter=true), to=hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a64fae2811034b73b9fe9da9fdd27b41 2024-12-15T04:41:17,726 DEBUG [M:0;e56de37b85b3:35185 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c237a7593f6642ed9c1e07a7b97a32df as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c237a7593f6642ed9c1e07a7b97a32df 2024-12-15T04:41:17,731 INFO [M:0;e56de37b85b3:35185 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c237a7593f6642ed9c1e07a7b97a32df, entries=8, sequenceid=2107, filesize=5.5 K 2024-12-15T04:41:17,732 DEBUG [M:0;e56de37b85b3:35185 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9be51774b2ba421e841fed33a3ff3172 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9be51774b2ba421e841fed33a3ff3172 2024-12-15T04:41:17,736 INFO [M:0;e56de37b85b3:35185 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 9be51774b2ba421e841fed33a3ff3172 2024-12-15T04:41:17,736 INFO [M:0;e56de37b85b3:35185 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9be51774b2ba421e841fed33a3ff3172, entries=173, sequenceid=2107, filesize=43.0 K 2024-12-15T04:41:17,737 DEBUG [M:0;e56de37b85b3:35185 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a64fae2811034b73b9fe9da9fdd27b41 as hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a64fae2811034b73b9fe9da9fdd27b41 2024-12-15T04:41:17,742 INFO [M:0;e56de37b85b3:35185 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:35921/user/jenkins/test-data/94ab43fd-9367-e896-4e24-9a104f2d19d9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a64fae2811034b73b9fe9da9fdd27b41, entries=1, sequenceid=2107, filesize=5.0 K 2024-12-15T04:41:17,743 INFO [M:0;e56de37b85b3:35185 {}] regionserver.HRegion(3040): Finished flush of dataSize ~750.96 KB/768981, heapSize ~921.88 KB/944000, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1314ms, sequenceid=2107, compaction requested=false 2024-12-15T04:41:17,745 INFO [M:0;e56de37b85b3:35185 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-15T04:41:17,745 DEBUG [M:0;e56de37b85b3:35185 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-15T04:41:17,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35109 is added to blk_1073741830_1006 (size=907151) 2024-12-15T04:41:17,749 INFO [M:0;e56de37b85b3:35185 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-15T04:41:17,749 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-15T04:41:17,749 INFO [M:0;e56de37b85b3:35185 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:35185 2024-12-15T04:41:17,783 DEBUG [M:0;e56de37b85b3:35185 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/e56de37b85b3,35185,1734237481331 already deleted, retry=false 2024-12-15T04:41:17,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-15T04:41:17,892 INFO [M:0;e56de37b85b3:35185 {}] regionserver.HRegionServer(1307): Exiting; stopping=e56de37b85b3,35185,1734237481331; zookeeper connection closed. 2024-12-15T04:41:17,892 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35185-0x10027fb030d0000, quorum=127.0.0.1:55935, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-15T04:41:17,896 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-15T04:41:17,898 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-15T04:41:17,898 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-15T04:41:17,898 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-15T04:41:17,898 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/hadoop.log.dir/,STOPPED} 2024-12-15T04:41:17,901 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-15T04:41:17,901 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-15T04:41:17,901 WARN [BP-1422360966-172.17.0.2-1734237478337 heartbeating to localhost/127.0.0.1:35921 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-15T04:41:17,901 WARN [BP-1422360966-172.17.0.2-1734237478337 heartbeating to localhost/127.0.0.1:35921 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1422360966-172.17.0.2-1734237478337 (Datanode Uuid 03fa4d33-7e3e-4a2e-ae82-b3f01fbb1d68) service to localhost/127.0.0.1:35921 2024-12-15T04:41:17,904 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/cluster_45efc259-f993-f806-507f-4a5cf2d8323c/dfs/data/data1/current/BP-1422360966-172.17.0.2-1734237478337 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-15T04:41:17,904 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/cluster_45efc259-f993-f806-507f-4a5cf2d8323c/dfs/data/data2/current/BP-1422360966-172.17.0.2-1734237478337 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-15T04:41:17,904 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-15T04:41:17,912 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-15T04:41:17,913 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-15T04:41:17,913 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-15T04:41:17,913 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-15T04:41:17,913 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5c46c870-b5d3-1932-f7f7-a01b6eeb0190/hadoop.log.dir/,STOPPED} 2024-12-15T04:41:17,927 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-15T04:41:18,050 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down